1 /**************************************************************************
2 *
3 * Copyright 2008 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "compiler/nir/nir.h"
29 #include "draw/draw_context.h"
30 #include "nir/nir_to_tgsi.h"
31 #include "util/format/u_format.h"
32 #include "util/format/u_format_s3tc.h"
33 #include "util/os_misc.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "util/u_screen.h"
37 #include "util/u_string.h"
38
39 #include "i915_context.h"
40 #include "i915_debug.h"
41 #include "i915_fpc.h"
42 #include "i915_public.h"
43 #include "i915_reg.h"
44 #include "i915_resource.h"
45 #include "i915_screen.h"
46 #include "i915_winsys.h"
47
48 /*
49 * Probe functions
50 */
51
52 static const char *
i915_get_vendor(struct pipe_screen * screen)53 i915_get_vendor(struct pipe_screen *screen)
54 {
55 return "Mesa Project";
56 }
57
58 static const char *
i915_get_device_vendor(struct pipe_screen * screen)59 i915_get_device_vendor(struct pipe_screen *screen)
60 {
61 return "Intel";
62 }
63
64 static const char *
i915_get_name(struct pipe_screen * screen)65 i915_get_name(struct pipe_screen *screen)
66 {
67 static char buffer[128];
68 const char *chipset;
69
70 switch (i915_screen(screen)->iws->pci_id) {
71 case PCI_CHIP_I915_G:
72 chipset = "915G";
73 break;
74 case PCI_CHIP_I915_GM:
75 chipset = "915GM";
76 break;
77 case PCI_CHIP_I945_G:
78 chipset = "945G";
79 break;
80 case PCI_CHIP_I945_GM:
81 chipset = "945GM";
82 break;
83 case PCI_CHIP_I945_GME:
84 chipset = "945GME";
85 break;
86 case PCI_CHIP_G33_G:
87 chipset = "G33";
88 break;
89 case PCI_CHIP_Q35_G:
90 chipset = "Q35";
91 break;
92 case PCI_CHIP_Q33_G:
93 chipset = "Q33";
94 break;
95 case PCI_CHIP_PINEVIEW_G:
96 chipset = "Pineview G";
97 break;
98 case PCI_CHIP_PINEVIEW_M:
99 chipset = "Pineview M";
100 break;
101 default:
102 chipset = "unknown";
103 break;
104 }
105
106 snprintf(buffer, sizeof(buffer), "i915 (chipset: %s)", chipset);
107 return buffer;
108 }
109
110 static const nir_shader_compiler_options i915_compiler_options = {
111 .fdot_replicates = true,
112 .fuse_ffma32 = true,
113 .lower_bitops = true, /* required for !CAP_INTEGERS nir_to_tgsi */
114 .lower_extract_byte = true,
115 .lower_extract_word = true,
116 .lower_fdiv = true,
117 .lower_fdph = true,
118 .lower_flrp32 = true,
119 .lower_fmod = true,
120 .lower_sincos = true,
121 .lower_uniforms_to_ubo = true,
122 .lower_vector_cmp = true,
123 .use_interpolated_input_intrinsics = true,
124 .force_indirect_unrolling = nir_var_all,
125 .force_indirect_unrolling_sampler = true,
126 .max_unroll_iterations = 32,
127 .no_integers = true,
128 .has_fused_comp_and_csel = true,
129 .has_ddx_intrinsics = true,
130 };
131
132 static const struct nir_shader_compiler_options gallivm_nir_options = {
133 .fdot_replicates = true,
134 .lower_bitops = true, /* required for !CAP_INTEGERS nir_to_tgsi */
135 .lower_scmp = true,
136 .lower_flrp32 = true,
137 .lower_flrp64 = true,
138 .lower_fsat = true,
139 .lower_bitfield_insert = true,
140 .lower_bitfield_extract = true,
141 .lower_fdph = true,
142 .lower_ffma16 = true,
143 .lower_ffma32 = true,
144 .lower_ffma64 = true,
145 .lower_fmod = true,
146 .lower_hadd = true,
147 .lower_uadd_sat = true,
148 .lower_usub_sat = true,
149 .lower_iadd_sat = true,
150 .lower_ldexp = true,
151 .lower_pack_snorm_2x16 = true,
152 .lower_pack_snorm_4x8 = true,
153 .lower_pack_unorm_2x16 = true,
154 .lower_pack_unorm_4x8 = true,
155 .lower_pack_half_2x16 = true,
156 .lower_pack_split = true,
157 .lower_unpack_snorm_2x16 = true,
158 .lower_unpack_snorm_4x8 = true,
159 .lower_unpack_unorm_2x16 = true,
160 .lower_unpack_unorm_4x8 = true,
161 .lower_unpack_half_2x16 = true,
162 .lower_extract_byte = true,
163 .lower_extract_word = true,
164 .lower_uadd_carry = true,
165 .lower_usub_borrow = true,
166 .lower_mul_2x32_64 = true,
167 .lower_ifind_msb = true,
168 .max_unroll_iterations = 32,
169 .use_interpolated_input_intrinsics = true,
170 .lower_cs_local_index_to_id = true,
171 .lower_uniforms_to_ubo = true,
172 .lower_vector_cmp = true,
173 .lower_device_index_to_zero = true,
174 /* .support_16bit_alu = true, */
175 .has_ddx_intrinsics = true,
176 };
177
178 static const void *
i915_get_compiler_options(struct pipe_screen * pscreen,enum pipe_shader_ir ir,enum pipe_shader_type shader)179 i915_get_compiler_options(struct pipe_screen *pscreen, enum pipe_shader_ir ir,
180 enum pipe_shader_type shader)
181 {
182 assert(ir == PIPE_SHADER_IR_NIR);
183 if (shader == PIPE_SHADER_FRAGMENT)
184 return &i915_compiler_options;
185 else
186 return &gallivm_nir_options;
187 }
188
189 static void
i915_optimize_nir(struct nir_shader * s)190 i915_optimize_nir(struct nir_shader *s)
191 {
192 bool progress;
193
194 do {
195 progress = false;
196
197 NIR_PASS_V(s, nir_lower_vars_to_ssa);
198
199 NIR_PASS(progress, s, nir_copy_prop);
200 NIR_PASS(progress, s, nir_opt_algebraic);
201 NIR_PASS(progress, s, nir_opt_constant_folding);
202 NIR_PASS(progress, s, nir_opt_remove_phis);
203 NIR_PASS(progress, s, nir_opt_conditional_discard);
204 NIR_PASS(progress, s, nir_opt_dce);
205 NIR_PASS(progress, s, nir_opt_dead_cf);
206 NIR_PASS(progress, s, nir_opt_cse);
207 NIR_PASS(progress, s, nir_opt_find_array_copies);
208 NIR_PASS(progress, s, nir_opt_if, nir_opt_if_optimize_phi_true_false);
209 NIR_PASS(progress, s, nir_opt_peephole_select, ~0 /* flatten all IFs. */,
210 true, true);
211 NIR_PASS(progress, s, nir_opt_algebraic);
212 NIR_PASS(progress, s, nir_opt_constant_folding);
213 NIR_PASS(progress, s, nir_opt_shrink_stores, true);
214 NIR_PASS(progress, s, nir_opt_shrink_vectors, false);
215 NIR_PASS(progress, s, nir_opt_loop);
216 NIR_PASS(progress, s, nir_opt_undef);
217 NIR_PASS(progress, s, nir_opt_loop_unroll);
218
219 } while (progress);
220
221 NIR_PASS(progress, s, nir_remove_dead_variables, nir_var_function_temp,
222 NULL);
223
224 /* Group texture loads together to try to avoid hitting the
225 * texture indirection phase limit.
226 */
227 NIR_PASS_V(s, nir_group_loads, nir_group_all, ~0);
228 }
229
230 static char *
i915_check_control_flow(nir_shader * s)231 i915_check_control_flow(nir_shader *s)
232 {
233 if (s->info.stage == MESA_SHADER_FRAGMENT) {
234 nir_function_impl *impl = nir_shader_get_entrypoint(s);
235 nir_block *first = nir_start_block(impl);
236 nir_cf_node *next = nir_cf_node_next(&first->cf_node);
237
238 if (next) {
239 switch (next->type) {
240 case nir_cf_node_if:
241 return "if/then statements not supported by i915 fragment shaders, "
242 "should have been flattened by peephole_select.";
243 case nir_cf_node_loop:
244 return "looping not supported i915 fragment shaders, all loops "
245 "must be statically unrollable.";
246 default:
247 return "Unknown control flow type";
248 }
249 }
250 }
251
252 return NULL;
253 }
254
255 static char *
i915_finalize_nir(struct pipe_screen * pscreen,void * nir)256 i915_finalize_nir(struct pipe_screen *pscreen, void *nir)
257 {
258 nir_shader *s = nir;
259
260 if (s->info.stage == MESA_SHADER_FRAGMENT)
261 i915_optimize_nir(s);
262
263 /* st_program.c's parameter list optimization requires that future nir
264 * variants don't reallocate the uniform storage, so we have to remove
265 * uniforms that occupy storage. But we don't want to remove samplers,
266 * because they're needed for YUV variant lowering.
267 */
268 nir_remove_dead_derefs(s);
269 nir_foreach_uniform_variable_safe (var, s) {
270 if (var->data.mode == nir_var_uniform &&
271 (glsl_type_get_image_count(var->type) ||
272 glsl_type_get_sampler_count(var->type)))
273 continue;
274
275 exec_node_remove(&var->node);
276 }
277 nir_validate_shader(s, "after uniform var removal");
278
279 nir_sweep(s);
280
281 char *msg = i915_check_control_flow(s);
282 if (msg) {
283 if (I915_DBG_ON(DBG_FS) && (!s->info.internal || NIR_DEBUG(PRINT_INTERNAL))) {
284 mesa_logi("failing shader:");
285 nir_log_shaderi(s);
286 }
287 return strdup(msg);
288 }
289
290 if (s->info.stage == MESA_SHADER_FRAGMENT)
291 return i915_test_fragment_shader_compile(pscreen, s);
292 else
293 return NULL;
294 }
295
296 static int
i915_get_shader_param(struct pipe_screen * screen,enum pipe_shader_type shader,enum pipe_shader_cap cap)297 i915_get_shader_param(struct pipe_screen *screen, enum pipe_shader_type shader,
298 enum pipe_shader_cap cap)
299 {
300 switch (cap) {
301 case PIPE_SHADER_CAP_SUPPORTED_IRS:
302 return (1 << PIPE_SHADER_IR_NIR) | (1 << PIPE_SHADER_IR_TGSI);
303
304 case PIPE_SHADER_CAP_INTEGERS:
305 /* mesa/st requires that this cap is the same across stages, and the FS
306 * can't do ints.
307 */
308 return 0;
309
310 /* i915 can't do these, and even if gallivm NIR can we call nir_to_tgsi
311 * manually and TGSI can't.
312 */
313 case PIPE_SHADER_CAP_INT16:
314 case PIPE_SHADER_CAP_FP16:
315 case PIPE_SHADER_CAP_FP16_DERIVATIVES:
316 case PIPE_SHADER_CAP_FP16_CONST_BUFFERS:
317 return 0;
318
319 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
320 /* While draw could normally handle this for the VS, the NIR lowering
321 * to regs can't handle our non-native-integers, so we have to lower to
322 * if ladders.
323 */
324 return 0;
325
326 default:
327 break;
328 }
329
330 switch (shader) {
331 case PIPE_SHADER_VERTEX:
332 switch (cap) {
333 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
334 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
335 return 0;
336 case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
337 case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
338 return 0;
339 default:
340 return draw_get_shader_param(shader, cap);
341 }
342 case PIPE_SHADER_FRAGMENT:
343 /* XXX: some of these are just shader model 2.0 values, fix this! */
344 switch (cap) {
345 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
346 return I915_MAX_ALU_INSN + I915_MAX_TEX_INSN;
347 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
348 return I915_MAX_ALU_INSN;
349 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
350 return I915_MAX_TEX_INSN;
351 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
352 return 4;
353 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
354 return 0;
355 case PIPE_SHADER_CAP_MAX_INPUTS:
356 return 10;
357 case PIPE_SHADER_CAP_MAX_OUTPUTS:
358 return 1;
359 case PIPE_SHADER_CAP_MAX_CONST_BUFFER0_SIZE:
360 return 32 * sizeof(float[4]);
361 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
362 return 1;
363 case PIPE_SHADER_CAP_MAX_TEMPS:
364 /* 16 inter-phase temps, 3 intra-phase temps. i915c reported 16. too. */
365 return 16;
366 case PIPE_SHADER_CAP_CONT_SUPPORTED:
367 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
368 return 0;
369 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
370 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
371 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
372 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
373 case PIPE_SHADER_CAP_SUBROUTINES:
374 return 0;
375 case PIPE_SHADER_CAP_INT64_ATOMICS:
376 case PIPE_SHADER_CAP_INT16:
377 case PIPE_SHADER_CAP_GLSL_16BIT_CONSTS:
378 return 0;
379 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
380 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
381 return I915_TEX_UNITS;
382 case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
383 case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
384 case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
385 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
386 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
387 return 0;
388
389 default:
390 debug_printf("%s: Unknown cap %u.\n", __func__, cap);
391 return 0;
392 }
393 break;
394 default:
395 return 0;
396 }
397 }
398
399 static int
i915_get_param(struct pipe_screen * screen,enum pipe_cap cap)400 i915_get_param(struct pipe_screen *screen, enum pipe_cap cap)
401 {
402 struct i915_screen *is = i915_screen(screen);
403
404 switch (cap) {
405 /* Supported features (boolean caps). */
406 case PIPE_CAP_ANISOTROPIC_FILTER:
407 case PIPE_CAP_NPOT_TEXTURES:
408 case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
409 case PIPE_CAP_PRIMITIVE_RESTART: /* draw module */
410 case PIPE_CAP_PRIMITIVE_RESTART_FIXED_INDEX:
411 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
412 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
413 case PIPE_CAP_VS_INSTANCEID:
414 case PIPE_CAP_VERTEX_COLOR_CLAMPED:
415 case PIPE_CAP_USER_VERTEX_BUFFERS:
416 case PIPE_CAP_MIXED_COLOR_DEPTH_BITS:
417 case PIPE_CAP_TGSI_TEXCOORD:
418 return 1;
419
420 case PIPE_CAP_TEXTURE_TRANSFER_MODES:
421 case PIPE_CAP_PCI_GROUP:
422 case PIPE_CAP_PCI_BUS:
423 case PIPE_CAP_PCI_DEVICE:
424 case PIPE_CAP_PCI_FUNCTION:
425 return 0;
426
427 case PIPE_CAP_ALLOW_MAPPED_BUFFERS_DURING_EXECUTION:
428 return 0;
429
430 case PIPE_CAP_SHAREABLE_SHADERS:
431 /* Can't expose shareable shaders because the draw shaders reference the
432 * draw module's state, which is per-context.
433 */
434 return 0;
435
436 case PIPE_CAP_MAX_GS_INVOCATIONS:
437 return 32;
438
439 case PIPE_CAP_MAX_SHADER_BUFFER_SIZE_UINT:
440 return 1 << 27;
441
442 case PIPE_CAP_MAX_VIEWPORTS:
443 return 1;
444
445 case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
446 return 64;
447
448 case PIPE_CAP_GLSL_FEATURE_LEVEL:
449 case PIPE_CAP_GLSL_FEATURE_LEVEL_COMPATIBILITY:
450 return 120;
451
452 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
453 return 16;
454
455 /* Texturing. */
456 case PIPE_CAP_MAX_TEXTURE_2D_SIZE:
457 return 1 << (I915_MAX_TEXTURE_2D_LEVELS - 1);
458 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
459 return I915_MAX_TEXTURE_3D_LEVELS;
460 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
461 return I915_MAX_TEXTURE_2D_LEVELS;
462
463 /* Render targets. */
464 case PIPE_CAP_MAX_RENDER_TARGETS:
465 return 1;
466
467 case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE:
468 return 2048;
469
470 /* Fragment coordinate conventions. */
471 case PIPE_CAP_FS_COORD_ORIGIN_UPPER_LEFT:
472 case PIPE_CAP_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
473 return 1;
474 case PIPE_CAP_ENDIANNESS:
475 return PIPE_ENDIAN_LITTLE;
476 case PIPE_CAP_MAX_VARYINGS:
477 return 10;
478
479 case PIPE_CAP_NIR_IMAGES_AS_DEREF:
480 return 0;
481
482 case PIPE_CAP_VENDOR_ID:
483 return 0x8086;
484 case PIPE_CAP_DEVICE_ID:
485 return is->iws->pci_id;
486 case PIPE_CAP_ACCELERATED:
487 return 1;
488 case PIPE_CAP_VIDEO_MEMORY: {
489 /* Once a batch uses more than 75% of the maximum mappable size, we
490 * assume that there's some fragmentation, and we start doing extra
491 * flushing, etc. That's the big cliff apps will care about.
492 */
493 const int gpu_mappable_megabytes =
494 is->iws->aperture_size(is->iws) * 3 / 4;
495 uint64_t system_memory;
496
497 if (!os_get_total_physical_memory(&system_memory))
498 return 0;
499
500 return MIN2(gpu_mappable_megabytes, (int)(system_memory >> 20));
501 }
502 case PIPE_CAP_UMA:
503 return 1;
504
505 default:
506 return u_pipe_screen_get_param_defaults(screen, cap);
507 }
508 }
509
510 static float
i915_get_paramf(struct pipe_screen * screen,enum pipe_capf cap)511 i915_get_paramf(struct pipe_screen *screen, enum pipe_capf cap)
512 {
513 switch (cap) {
514 case PIPE_CAPF_MIN_LINE_WIDTH:
515 case PIPE_CAPF_MIN_LINE_WIDTH_AA:
516 case PIPE_CAPF_MIN_POINT_SIZE:
517 case PIPE_CAPF_MIN_POINT_SIZE_AA:
518 return 1;
519
520 case PIPE_CAPF_POINT_SIZE_GRANULARITY:
521 case PIPE_CAPF_LINE_WIDTH_GRANULARITY:
522 return 0.1;
523
524 case PIPE_CAPF_MAX_LINE_WIDTH:
525 FALLTHROUGH;
526 case PIPE_CAPF_MAX_LINE_WIDTH_AA:
527 return 7.5;
528
529 case PIPE_CAPF_MAX_POINT_SIZE:
530 FALLTHROUGH;
531 case PIPE_CAPF_MAX_POINT_SIZE_AA:
532 return 255.0;
533
534 case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
535 return 4.0;
536
537 case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
538 return 16.0;
539
540 case PIPE_CAPF_MIN_CONSERVATIVE_RASTER_DILATE:
541 FALLTHROUGH;
542 case PIPE_CAPF_MAX_CONSERVATIVE_RASTER_DILATE:
543 FALLTHROUGH;
544 case PIPE_CAPF_CONSERVATIVE_RASTER_DILATE_GRANULARITY:
545 return 0.0f;
546
547 default:
548 debug_printf("%s: Unknown cap %u.\n", __func__, cap);
549 return 0;
550 }
551 }
552
553 bool
i915_is_format_supported(struct pipe_screen * screen,enum pipe_format format,enum pipe_texture_target target,unsigned sample_count,unsigned storage_sample_count,unsigned tex_usage)554 i915_is_format_supported(struct pipe_screen *screen, enum pipe_format format,
555 enum pipe_texture_target target, unsigned sample_count,
556 unsigned storage_sample_count, unsigned tex_usage)
557 {
558 static const enum pipe_format tex_supported[] = {
559 PIPE_FORMAT_B8G8R8A8_UNORM, PIPE_FORMAT_B8G8R8A8_SRGB,
560 PIPE_FORMAT_B8G8R8X8_UNORM, PIPE_FORMAT_R8G8B8A8_UNORM,
561 PIPE_FORMAT_R8G8B8X8_UNORM, PIPE_FORMAT_B4G4R4A4_UNORM,
562 PIPE_FORMAT_B5G6R5_UNORM, PIPE_FORMAT_B5G5R5A1_UNORM,
563 PIPE_FORMAT_B10G10R10A2_UNORM, PIPE_FORMAT_L8_UNORM, PIPE_FORMAT_A8_UNORM,
564 PIPE_FORMAT_I8_UNORM, PIPE_FORMAT_L8A8_UNORM, PIPE_FORMAT_UYVY,
565 PIPE_FORMAT_YUYV,
566 /* XXX why not?
567 PIPE_FORMAT_Z16_UNORM, */
568 PIPE_FORMAT_DXT1_RGB, PIPE_FORMAT_DXT1_SRGB, PIPE_FORMAT_DXT1_RGBA,
569 PIPE_FORMAT_DXT1_SRGBA, PIPE_FORMAT_DXT3_RGBA, PIPE_FORMAT_DXT3_SRGBA,
570 PIPE_FORMAT_DXT5_RGBA, PIPE_FORMAT_DXT5_SRGBA, PIPE_FORMAT_Z24X8_UNORM,
571 PIPE_FORMAT_FXT1_RGB, PIPE_FORMAT_FXT1_RGBA,
572 PIPE_FORMAT_Z24_UNORM_S8_UINT, PIPE_FORMAT_NONE /* list terminator */
573 };
574 static const enum pipe_format render_supported[] = {
575 PIPE_FORMAT_B8G8R8A8_UNORM, PIPE_FORMAT_B8G8R8X8_UNORM,
576 PIPE_FORMAT_R8G8B8A8_UNORM, PIPE_FORMAT_R8G8B8X8_UNORM,
577 PIPE_FORMAT_B5G6R5_UNORM, PIPE_FORMAT_B5G5R5A1_UNORM,
578 PIPE_FORMAT_B4G4R4A4_UNORM, PIPE_FORMAT_B10G10R10A2_UNORM,
579 PIPE_FORMAT_L8_UNORM, PIPE_FORMAT_A8_UNORM,
580 PIPE_FORMAT_I8_UNORM, PIPE_FORMAT_NONE /* list terminator */
581 };
582 static const enum pipe_format depth_supported[] = {
583 /* XXX why not?
584 PIPE_FORMAT_Z16_UNORM, */
585 PIPE_FORMAT_Z24X8_UNORM, PIPE_FORMAT_Z24_UNORM_S8_UINT,
586 PIPE_FORMAT_NONE /* list terminator */
587 };
588 const enum pipe_format *list;
589 uint32_t i;
590
591 if (sample_count > 1)
592 return false;
593
594 if (MAX2(1, sample_count) != MAX2(1, storage_sample_count))
595 return false;
596
597 if (tex_usage & PIPE_BIND_DEPTH_STENCIL)
598 list = depth_supported;
599 else if (tex_usage & PIPE_BIND_RENDER_TARGET)
600 list = render_supported;
601 else if (tex_usage & PIPE_BIND_SAMPLER_VIEW)
602 list = tex_supported;
603 else
604 return true; /* PIPE_BIND_{VERTEX,INDEX}_BUFFER */
605
606 for (i = 0; list[i] != PIPE_FORMAT_NONE; i++) {
607 if (list[i] == format)
608 return true;
609 }
610
611 return false;
612 }
613
614 /*
615 * Fence functions
616 */
617
618 static void
i915_fence_reference(struct pipe_screen * screen,struct pipe_fence_handle ** ptr,struct pipe_fence_handle * fence)619 i915_fence_reference(struct pipe_screen *screen, struct pipe_fence_handle **ptr,
620 struct pipe_fence_handle *fence)
621 {
622 struct i915_screen *is = i915_screen(screen);
623
624 is->iws->fence_reference(is->iws, ptr, fence);
625 }
626
627 static bool
i915_fence_finish(struct pipe_screen * screen,struct pipe_context * ctx,struct pipe_fence_handle * fence,uint64_t timeout)628 i915_fence_finish(struct pipe_screen *screen, struct pipe_context *ctx,
629 struct pipe_fence_handle *fence, uint64_t timeout)
630 {
631 struct i915_screen *is = i915_screen(screen);
632
633 if (!timeout)
634 return is->iws->fence_signalled(is->iws, fence) == 1;
635
636 return is->iws->fence_finish(is->iws, fence) == 1;
637 }
638
639 /*
640 * Generic functions
641 */
642
643 static void
i915_destroy_screen(struct pipe_screen * screen)644 i915_destroy_screen(struct pipe_screen *screen)
645 {
646 struct i915_screen *is = i915_screen(screen);
647
648 if (is->iws)
649 is->iws->destroy(is->iws);
650
651 FREE(is);
652 }
653
654 static int
i915_screen_get_fd(struct pipe_screen * screen)655 i915_screen_get_fd(struct pipe_screen *screen)
656 {
657 struct i915_screen *is = i915_screen(screen);
658
659 return is->iws->get_fd(is->iws);
660 }
661
662 /**
663 * Create a new i915_screen object
664 */
665 struct pipe_screen *
i915_screen_create(struct i915_winsys * iws)666 i915_screen_create(struct i915_winsys *iws)
667 {
668 struct i915_screen *is = CALLOC_STRUCT(i915_screen);
669
670 if (!is)
671 return NULL;
672
673 switch (iws->pci_id) {
674 case PCI_CHIP_I915_G:
675 case PCI_CHIP_I915_GM:
676 is->is_i945 = false;
677 break;
678
679 case PCI_CHIP_I945_G:
680 case PCI_CHIP_I945_GM:
681 case PCI_CHIP_I945_GME:
682 case PCI_CHIP_G33_G:
683 case PCI_CHIP_Q33_G:
684 case PCI_CHIP_Q35_G:
685 case PCI_CHIP_PINEVIEW_G:
686 case PCI_CHIP_PINEVIEW_M:
687 is->is_i945 = true;
688 break;
689
690 default:
691 debug_printf("%s: unknown pci id 0x%x, cannot create screen\n", __func__,
692 iws->pci_id);
693 FREE(is);
694 return NULL;
695 }
696
697 is->iws = iws;
698
699 is->base.destroy = i915_destroy_screen;
700
701 is->base.get_name = i915_get_name;
702 is->base.get_vendor = i915_get_vendor;
703 is->base.get_device_vendor = i915_get_device_vendor;
704 is->base.get_screen_fd = i915_screen_get_fd;
705 is->base.get_param = i915_get_param;
706 is->base.get_shader_param = i915_get_shader_param;
707 is->base.get_paramf = i915_get_paramf;
708 is->base.get_compiler_options = i915_get_compiler_options;
709 is->base.finalize_nir = i915_finalize_nir;
710 is->base.is_format_supported = i915_is_format_supported;
711
712 is->base.context_create = i915_create_context;
713
714 is->base.fence_reference = i915_fence_reference;
715 is->base.fence_finish = i915_fence_finish;
716
717 i915_init_screen_resource_functions(is);
718
719 i915_debug_init(is);
720
721 return &is->base;
722 }
723