1 /*
2 * Copyright (c) 2008-2024 Broadcom. All Rights Reserved.
3 * The term “Broadcom” refers to Broadcom Inc.
4 * and/or its subsidiaries.
5 * SPDX-License-Identifier: MIT
6 */
7
8 #include "util/u_inlines.h"
9 #include "util/u_memory.h"
10 #include "pipe/p_defines.h"
11 #include "util/u_math.h"
12
13 #include "svga_resource_texture.h"
14 #include "svga_sampler_view.h"
15 #include "svga_winsys.h"
16 #include "svga_context.h"
17 #include "svga_shader.h"
18 #include "svga_state.h"
19 #include "svga_cmd.h"
20
21
22 /**
23 * Called when tearing down a context to free resources and samplers.
24 */
25 void
svga_cleanup_tss_binding(struct svga_context * svga)26 svga_cleanup_tss_binding(struct svga_context *svga)
27 {
28 const enum pipe_shader_type shader = PIPE_SHADER_FRAGMENT;
29 unsigned i;
30
31 for (i = 0; i < ARRAY_SIZE(svga->state.hw_draw.views); i++) {
32 struct svga_hw_view_state *view = &svga->state.hw_draw.views[i];
33 if (view) {
34 svga_sampler_view_reference(&view->v, NULL);
35 pipe_sampler_view_reference(&svga->curr.sampler_views[shader][i],
36 NULL);
37 pipe_resource_reference(&view->texture, NULL);
38 view->dirty = true;
39 }
40 }
41 }
42
43
44 struct bind_queue {
45 struct {
46 unsigned unit;
47 struct svga_hw_view_state *view;
48 } bind[PIPE_MAX_SAMPLERS];
49
50 unsigned bind_count;
51 };
52
53
54 /**
55 * Update the texture binding for one texture unit.
56 */
57 static void
emit_tex_binding_unit(struct svga_context * svga,unsigned unit,const struct svga_sampler_state * s,const struct pipe_sampler_view * sv,struct svga_hw_view_state * view,bool reemit,struct bind_queue * queue)58 emit_tex_binding_unit(struct svga_context *svga,
59 unsigned unit,
60 const struct svga_sampler_state *s,
61 const struct pipe_sampler_view *sv,
62 struct svga_hw_view_state *view,
63 bool reemit,
64 struct bind_queue *queue)
65 {
66 struct pipe_resource *texture = NULL;
67 unsigned last_level, min_lod, max_lod;
68
69 /* get min max lod */
70 if (sv && s) {
71 if (s->mipfilter == SVGA3D_TEX_FILTER_NONE) {
72 /* just use the base level image */
73 min_lod = max_lod = sv->u.tex.first_level;
74 }
75 else {
76 last_level = MIN2(sv->u.tex.last_level, sv->texture->last_level);
77 min_lod = s->view_min_lod + sv->u.tex.first_level;
78 min_lod = MIN2(min_lod, last_level);
79 max_lod = MIN2(s->view_max_lod + sv->u.tex.first_level, last_level);
80 }
81 texture = sv->texture;
82 }
83 else {
84 min_lod = 0;
85 max_lod = 0;
86 }
87
88 if (view->texture != texture ||
89 view->min_lod != min_lod ||
90 view->max_lod != max_lod) {
91
92 svga_sampler_view_reference(&view->v, NULL);
93 pipe_resource_reference(&view->texture, texture);
94
95 view->dirty = true;
96 view->min_lod = min_lod;
97 view->max_lod = max_lod;
98
99 if (texture) {
100 view->v = svga_get_tex_sampler_view(&svga->pipe,
101 texture,
102 min_lod,
103 max_lod);
104 }
105 }
106
107 /*
108 * We need to reemit non-null texture bindings, even when they are not
109 * dirty, to ensure that the resources are paged in.
110 */
111 if (view->dirty || (reemit && view->v)) {
112 queue->bind[queue->bind_count].unit = unit;
113 queue->bind[queue->bind_count].view = view;
114 queue->bind_count++;
115 }
116
117 if (!view->dirty && view->v) {
118 svga_validate_sampler_view(svga, view->v);
119 }
120 }
121
122
123 static enum pipe_error
update_tss_binding(struct svga_context * svga,uint64_t dirty)124 update_tss_binding(struct svga_context *svga, uint64_t dirty )
125 {
126 const enum pipe_shader_type shader = PIPE_SHADER_FRAGMENT;
127 bool reemit = svga->rebind.flags.texture_samplers;
128 unsigned i;
129 unsigned count = MAX2(svga->curr.num_sampler_views[shader],
130 svga->state.hw_draw.num_views);
131
132 struct bind_queue queue;
133
134 assert(!svga_have_vgpu10(svga));
135
136 queue.bind_count = 0;
137
138 for (i = 0; i < count; i++) {
139 emit_tex_binding_unit(svga, i,
140 svga->curr.sampler[shader][i],
141 svga->curr.sampler_views[shader][i],
142 &svga->state.hw_draw.views[i],
143 reemit,
144 &queue);
145 }
146
147 svga->state.hw_draw.num_views = svga->curr.num_sampler_views[shader];
148
149 /* Polygon stipple */
150 if (svga->curr.rast->templ.poly_stipple_enable) {
151 const unsigned unit =
152 svga_fs_variant(svga->state.hw_draw.fs)->pstipple_sampler_unit;
153 emit_tex_binding_unit(svga, unit,
154 svga->polygon_stipple.sampler,
155 &svga->polygon_stipple.sampler_view->base,
156 &svga->state.hw_draw.views[unit],
157 reemit,
158 &queue);
159 }
160
161 svga->state.hw_draw.num_backed_views = 0;
162
163 if (queue.bind_count) {
164 SVGA3dTextureState *ts;
165
166 if (SVGA3D_BeginSetTextureState(svga->swc, &ts,
167 queue.bind_count) != PIPE_OK)
168 goto fail;
169
170 for (i = 0; i < queue.bind_count; i++) {
171 struct svga_winsys_surface *handle;
172 struct svga_hw_view_state *view = queue.bind[i].view;
173
174 ts[i].stage = queue.bind[i].unit;
175 ts[i].name = SVGA3D_TS_BIND_TEXTURE;
176
177 if (view->v) {
178 handle = view->v->handle;
179
180 /* Keep track of number of views with a backing copy
181 * of texture.
182 */
183 if (handle != svga_texture(view->texture)->handle)
184 svga->state.hw_draw.num_backed_views++;
185 }
186 else {
187 handle = NULL;
188 }
189 svga->swc->surface_relocation(svga->swc,
190 &ts[i].value,
191 NULL,
192 handle,
193 SVGA_RELOC_READ);
194
195 queue.bind[i].view->dirty = false;
196 }
197
198 SVGA_FIFOCommitAll(svga->swc);
199 }
200
201 svga->rebind.flags.texture_samplers = false;
202
203 return PIPE_OK;
204
205 fail:
206 return PIPE_ERROR_OUT_OF_MEMORY;
207 }
208
209
210 /*
211 * Rebind textures.
212 *
213 * Similar to update_tss_binding, but without any state checking/update.
214 *
215 * Called at the beginning of every new command buffer to ensure that
216 * non-dirty textures are properly paged-in.
217 */
218 enum pipe_error
svga_reemit_tss_bindings(struct svga_context * svga)219 svga_reemit_tss_bindings(struct svga_context *svga)
220 {
221 unsigned i;
222 enum pipe_error ret;
223 struct bind_queue queue;
224
225 assert(!svga_have_vgpu10(svga));
226 assert(svga->rebind.flags.texture_samplers);
227
228 queue.bind_count = 0;
229
230 for (i = 0; i < svga->state.hw_draw.num_views; i++) {
231 struct svga_hw_view_state *view = &svga->state.hw_draw.views[i];
232
233 if (view->v) {
234 queue.bind[queue.bind_count].unit = i;
235 queue.bind[queue.bind_count].view = view;
236 queue.bind_count++;
237 }
238 }
239
240 /* Polygon stipple */
241 if (svga->curr.rast && svga->curr.rast->templ.poly_stipple_enable) {
242 const unsigned unit =
243 svga_fs_variant(svga->state.hw_draw.fs)->pstipple_sampler_unit;
244 struct svga_hw_view_state *view = &svga->state.hw_draw.views[unit];
245
246 if (view->v) {
247 queue.bind[queue.bind_count].unit = unit;
248 queue.bind[queue.bind_count].view = view;
249 queue.bind_count++;
250 }
251 }
252
253 if (queue.bind_count) {
254 SVGA3dTextureState *ts;
255
256 ret = SVGA3D_BeginSetTextureState(svga->swc, &ts, queue.bind_count);
257 if (ret != PIPE_OK) {
258 return ret;
259 }
260
261 for (i = 0; i < queue.bind_count; i++) {
262 struct svga_winsys_surface *handle;
263
264 ts[i].stage = queue.bind[i].unit;
265 ts[i].name = SVGA3D_TS_BIND_TEXTURE;
266
267 assert(queue.bind[i].view->v);
268 handle = queue.bind[i].view->v->handle;
269 svga->swc->surface_relocation(svga->swc,
270 &ts[i].value,
271 NULL,
272 handle,
273 SVGA_RELOC_READ);
274 }
275
276 SVGA_FIFOCommitAll(svga->swc);
277 }
278
279 svga->rebind.flags.texture_samplers = false;
280
281 return PIPE_OK;
282 }
283
284
285 struct svga_tracked_state svga_hw_tss_binding = {
286 "texture binding emit",
287 SVGA_NEW_FRAME_BUFFER |
288 SVGA_NEW_TEXTURE_BINDING |
289 SVGA_NEW_STIPPLE |
290 SVGA_NEW_SAMPLER,
291 update_tss_binding
292 };
293
294
295
296 struct ts_queue {
297 unsigned ts_count;
298 SVGA3dTextureState ts[PIPE_MAX_SAMPLERS*SVGA3D_TS_MAX];
299 };
300
301
302 static inline void
svga_queue_tss(struct ts_queue * q,unsigned unit,unsigned tss,unsigned value)303 svga_queue_tss(struct ts_queue *q, unsigned unit, unsigned tss, unsigned value)
304 {
305 assert(q->ts_count < ARRAY_SIZE(q->ts));
306 q->ts[q->ts_count].stage = unit;
307 q->ts[q->ts_count].name = tss;
308 q->ts[q->ts_count].value = value;
309 q->ts_count++;
310 }
311
312
313 #define EMIT_TS(svga, unit, val, token) \
314 do { \
315 assert(unit < ARRAY_SIZE(svga->state.hw_draw.ts)); \
316 STATIC_ASSERT(SVGA3D_TS_##token < ARRAY_SIZE(svga->state.hw_draw.ts[unit])); \
317 if (svga->state.hw_draw.ts[unit][SVGA3D_TS_##token] != val) { \
318 svga_queue_tss(queue, unit, SVGA3D_TS_##token, val); \
319 svga->state.hw_draw.ts[unit][SVGA3D_TS_##token] = val; \
320 } \
321 } while (0)
322
323 #define EMIT_TS_FLOAT(svga, unit, fvalue, token) \
324 do { \
325 unsigned val = fui(fvalue); \
326 assert(unit < ARRAY_SIZE(svga->state.hw_draw.ts)); \
327 STATIC_ASSERT(SVGA3D_TS_##token < ARRAY_SIZE(svga->state.hw_draw.ts[unit])); \
328 if (svga->state.hw_draw.ts[unit][SVGA3D_TS_##token] != val) { \
329 svga_queue_tss(queue, unit, SVGA3D_TS_##token, val); \
330 svga->state.hw_draw.ts[unit][SVGA3D_TS_##token] = val; \
331 } \
332 } while (0)
333
334
335 /**
336 * Emit texture sampler state (tss) for one texture unit.
337 */
338 static void
emit_tss_unit(struct svga_context * svga,unsigned unit,const struct svga_sampler_state * state,struct ts_queue * queue)339 emit_tss_unit(struct svga_context *svga, unsigned unit,
340 const struct svga_sampler_state *state,
341 struct ts_queue *queue)
342 {
343 EMIT_TS(svga, unit, state->mipfilter, MIPFILTER);
344 EMIT_TS(svga, unit, state->min_lod, TEXTURE_MIPMAP_LEVEL);
345 EMIT_TS(svga, unit, state->magfilter, MAGFILTER);
346 EMIT_TS(svga, unit, state->minfilter, MINFILTER);
347 EMIT_TS(svga, unit, state->aniso_level, TEXTURE_ANISOTROPIC_LEVEL);
348 EMIT_TS_FLOAT(svga, unit, state->lod_bias, TEXTURE_LOD_BIAS);
349 EMIT_TS(svga, unit, state->addressu, ADDRESSU);
350 EMIT_TS(svga, unit, state->addressw, ADDRESSW);
351 EMIT_TS(svga, unit, state->bordercolor, BORDERCOLOR);
352 // TEXCOORDINDEX -- hopefully not needed
353
354 if (svga->curr.tex_flags.flag_1d & (1 << unit))
355 EMIT_TS(svga, unit, SVGA3D_TEX_ADDRESS_WRAP, ADDRESSV);
356 else
357 EMIT_TS(svga, unit, state->addressv, ADDRESSV);
358
359 if (svga->curr.tex_flags.flag_srgb & (1 << unit))
360 EMIT_TS_FLOAT(svga, unit, 2.2f, GAMMA);
361 else
362 EMIT_TS_FLOAT(svga, unit, 1.0f, GAMMA);
363 }
364
365 static enum pipe_error
update_tss(struct svga_context * svga,uint64_t dirty)366 update_tss(struct svga_context *svga, uint64_t dirty )
367 {
368 const enum pipe_shader_type shader = PIPE_SHADER_FRAGMENT;
369 unsigned i;
370 struct ts_queue queue;
371
372 assert(!svga_have_vgpu10(svga));
373
374 queue.ts_count = 0;
375 for (i = 0; i < svga->curr.num_samplers[shader]; i++) {
376 if (svga->curr.sampler[shader][i]) {
377 const struct svga_sampler_state *curr = svga->curr.sampler[shader][i];
378 emit_tss_unit(svga, i, curr, &queue);
379 }
380 }
381
382 /* polygon stipple sampler */
383 if (svga->curr.rast->templ.poly_stipple_enable) {
384 emit_tss_unit(svga,
385 svga_fs_variant(svga->state.hw_draw.fs)->pstipple_sampler_unit,
386 svga->polygon_stipple.sampler,
387 &queue);
388 }
389
390 if (queue.ts_count) {
391 SVGA3dTextureState *ts;
392
393 if (SVGA3D_BeginSetTextureState(svga->swc, &ts, queue.ts_count) != PIPE_OK)
394 goto fail;
395
396 memcpy(ts, queue.ts, queue.ts_count * sizeof queue.ts[0]);
397
398 SVGA_FIFOCommitAll(svga->swc);
399 }
400
401 return PIPE_OK;
402
403 fail:
404 /* XXX: need to poison cached hardware state on failure to ensure
405 * dirty state gets re-emitted. Fix this by re-instating partial
406 * FIFOCommit command and only updating cached hw state once the
407 * initial allocation has succeeded.
408 */
409 memset(svga->state.hw_draw.ts, 0xcd, sizeof(svga->state.hw_draw.ts));
410
411 return PIPE_ERROR_OUT_OF_MEMORY;
412 }
413
414
415 struct svga_tracked_state svga_hw_tss = {
416 "texture state emit",
417 (SVGA_NEW_SAMPLER |
418 SVGA_NEW_STIPPLE |
419 SVGA_NEW_TEXTURE_FLAGS),
420 update_tss
421 };
422
423