1 /*
2 * Copyright (c) 2022-2024 Broadcom. All Rights Reserved.
3 * The term “Broadcom” refers to Broadcom Inc.
4 * and/or its subsidiaries.
5 * SPDX-License-Identifier: MIT
6 */
7
8 #include "pipe/p_defines.h"
9 #include "util/u_bitmask.h"
10 #include "util/format/u_format.h"
11 #include "util/u_inlines.h"
12 #include "util/u_math.h"
13 #include "util/u_memory.h"
14
15 #include "svga_context.h"
16 #include "svga_cmd.h"
17 #include "svga_debug.h"
18 #include "svga_resource_buffer.h"
19 #include "svga_resource_texture.h"
20 #include "svga_surface.h"
21 #include "svga_sampler_view.h"
22 #include "svga_format.h"
23
24
25 /**
26 * Create a uav object for the specified shader buffer
27 */
28 SVGA3dUAViewId
svga_create_uav_buffer(struct svga_context * svga,const struct pipe_shader_buffer * buf,SVGA3dSurfaceFormat format,SVGA3dUABufferFlags bufFlag)29 svga_create_uav_buffer(struct svga_context *svga,
30 const struct pipe_shader_buffer *buf,
31 SVGA3dSurfaceFormat format,
32 SVGA3dUABufferFlags bufFlag)
33 {
34 SVGA3dUAViewDesc desc;
35 unsigned uaViewId;
36
37 assert(buf);
38
39 /* If there is not one defined, create one. */
40 memset(&desc, 0, sizeof(desc));
41 desc.buffer.firstElement = buf->buffer_offset / sizeof(uint32);
42 desc.buffer.numElements = buf->buffer_size / sizeof(uint32);
43 desc.buffer.flags = bufFlag;
44
45 uaViewId = svga_create_uav(svga, &desc, format,
46 SVGA3D_RESOURCE_BUFFER,
47 svga_buffer_handle(svga, buf->buffer,
48 PIPE_BIND_SHADER_BUFFER));
49 if (uaViewId == SVGA3D_INVALID_ID)
50 return uaViewId;
51
52 SVGA_DBG(DEBUG_UAV, "%s: resource=0x%x uaViewId=%d\n",
53 __func__, buf->buffer, uaViewId);
54
55 /* Mark this buffer as a uav bound buffer */
56 struct svga_buffer *sbuf = svga_buffer(buf->buffer);
57 sbuf->uav = true;
58
59 return uaViewId;
60 }
61
62
63 /**
64 * Set shader buffers.
65 */
66 static void
svga_set_shader_buffers(struct pipe_context * pipe,enum pipe_shader_type shader,unsigned start,unsigned num,const struct pipe_shader_buffer * buffers,unsigned writeable_bitmask)67 svga_set_shader_buffers(struct pipe_context *pipe,
68 enum pipe_shader_type shader,
69 unsigned start, unsigned num,
70 const struct pipe_shader_buffer *buffers,
71 unsigned writeable_bitmask)
72 {
73 struct svga_context *svga = svga_context(pipe);
74 const struct pipe_shader_buffer *buf;
75
76 assert(svga_have_gl43(svga));
77
78 assert(start + num <= SVGA_MAX_SHADER_BUFFERS);
79
80 #if MESA_DEBUG
81 const struct pipe_shader_buffer *b = buffers;
82 SVGA_DBG(DEBUG_UAV, "%s: shader=%d start=%d num=%d ",
83 __func__, shader, start, num);
84 if (buffers) {
85 for (unsigned i = 0; i < num; i++, b++) {
86 SVGA_DBG(DEBUG_UAV, " 0x%x ", b);
87 }
88 }
89 SVGA_DBG(DEBUG_UAV, "\n");
90 #endif
91
92 buf = buffers;
93 if (buffers) {
94 int last_buffer = -1;
95 for (unsigned i = start, j=0; i < start + num; i++, buf++, j++) {
96 struct svga_shader_buffer *cbuf = &svga->curr.shader_buffers[shader][i];
97
98 if (buf && buf->buffer) {
99 cbuf->desc = *buf;
100 pipe_resource_reference(&cbuf->resource, buf->buffer);
101
102 /* Mark the last bound shader buffer */
103 last_buffer = i;
104 }
105 else {
106 cbuf->desc.buffer = NULL;
107 pipe_resource_reference(&cbuf->resource, NULL);
108 }
109 cbuf->uav_index = -1;
110 cbuf->writeAccess = (writeable_bitmask & (1 << j)) != 0;
111 }
112 svga->curr.num_shader_buffers[shader] =
113 MAX2(svga->curr.num_shader_buffers[shader], last_buffer + 1);
114 }
115 else {
116 for (unsigned i = start; i < start + num; i++) {
117 struct svga_shader_buffer *cbuf = &svga->curr.shader_buffers[shader][i];
118 cbuf->desc.buffer = NULL;
119 cbuf->uav_index = -1;
120 pipe_resource_reference(&cbuf->resource, NULL);
121 }
122 if ((start + num) >= svga->curr.num_shader_buffers[shader])
123 svga->curr.num_shader_buffers[shader] = start;
124 }
125
126 #if MESA_DEBUG
127 SVGA_DBG(DEBUG_UAV,
128 "%s: current num_shader_buffers=%d start=%d num=%d buffers=",
129 __func__, svga->curr.num_shader_buffers[shader],
130 start, num);
131
132 for (unsigned i = start; i < start + num; i++) {
133 struct svga_shader_buffer *cbuf = &svga->curr.shader_buffers[shader][i];
134 SVGA_DBG(DEBUG_UAV, " 0x%x ", cbuf->desc.buffer);
135 }
136
137 SVGA_DBG(DEBUG_UAV, "\n");
138 #endif
139
140 /* purge any unused uav objects */
141 svga_destroy_uav(svga);
142
143 svga->dirty |= SVGA_NEW_SHADER_BUFFER;
144 }
145
146
147 /**
148 * Set HW atomic buffers.
149 */
150 static void
svga_set_hw_atomic_buffers(struct pipe_context * pipe,unsigned start,unsigned num,const struct pipe_shader_buffer * buffers)151 svga_set_hw_atomic_buffers(struct pipe_context *pipe,
152 unsigned start, unsigned num,
153 const struct pipe_shader_buffer *buffers)
154 {
155 struct svga_context *svga = svga_context(pipe);
156 const struct pipe_shader_buffer *buf = buffers;
157
158 assert(svga_have_gl43(svga));
159
160 assert(start + num <= SVGA_MAX_ATOMIC_BUFFERS);
161
162 #if MESA_DEBUG
163 SVGA_DBG(DEBUG_UAV, "%s: start=%d num=%d \n", __func__, start, num);
164 #endif
165
166 buf = buffers;
167 if (buffers) {
168 int last_buffer = -1;
169 for (unsigned i = start; i < start + num; i++, buf++) {
170 struct svga_shader_buffer *cbuf = &svga->curr.atomic_buffers[i];
171
172 if (buf && buf->buffer) {
173 cbuf->desc = *buf;
174 pipe_resource_reference(&cbuf->resource, buf->buffer);
175
176 last_buffer = i;
177
178 /* Mark the buffer as uav buffer so that a readback will
179 * be done at each read transfer. We can't rely on the
180 * dirty bit because it is reset after each read, but
181 * the uav buffer can be updated at each draw.
182 */
183 struct svga_buffer *sbuf = svga_buffer(cbuf->desc.buffer);
184 sbuf->uav = true;
185 }
186 else {
187 cbuf->desc.buffer = NULL;
188 pipe_resource_reference(&cbuf->resource, NULL);
189 }
190 cbuf->uav_index = -1;
191 }
192 svga->curr.num_atomic_buffers = MAX2(svga->curr.num_atomic_buffers,
193 last_buffer + 1);
194 }
195 else {
196 for (unsigned i = start; i < start + num; i++) {
197 struct svga_shader_buffer *cbuf = &svga->curr.atomic_buffers[i];
198 cbuf->desc.buffer = NULL;
199 cbuf->uav_index = -1;
200 pipe_resource_reference(&cbuf->resource, NULL);
201 }
202 if ((start + num) >= svga->curr.num_atomic_buffers)
203 svga->curr.num_atomic_buffers = start;
204 }
205
206 #if MESA_DEBUG
207 SVGA_DBG(DEBUG_UAV, "%s: current num_atomic_buffers=%d start=%d num=%d ",
208 __func__, svga->curr.num_atomic_buffers,
209 start, num);
210
211 for (unsigned i = start; i < start + num; i++) {
212 struct svga_shader_buffer *cbuf = &svga->curr.atomic_buffers[i];
213 SVGA_DBG(DEBUG_UAV, " 0x%x ", cbuf->desc.buffer);
214 }
215
216 SVGA_DBG(DEBUG_UAV, "\n");
217 #endif
218
219 /* purge any unused uav objects */
220 svga_destroy_uav(svga);
221
222 svga->dirty |= SVGA_NEW_SHADER_BUFFER;
223 }
224
225
226 /**
227 * Initialize shader images gallium interface
228 */
229 void
svga_init_shader_buffer_functions(struct svga_context * svga)230 svga_init_shader_buffer_functions(struct svga_context *svga)
231 {
232 if (!svga_have_gl43(svga))
233 return;
234
235 svga->pipe.set_shader_buffers = svga_set_shader_buffers;
236 svga->pipe.set_hw_atomic_buffers = svga_set_hw_atomic_buffers;
237
238 /* Initialize shader buffers */
239 for (unsigned shader = 0; shader < PIPE_SHADER_TYPES; ++shader) {
240 struct svga_shader_buffer *hw_buf =
241 &svga->state.hw_draw.shader_buffers[shader][0];
242 struct svga_shader_buffer *cur_buf =
243 &svga->curr.shader_buffers[shader][0];
244
245 /* Initialize uaViewId to SVGA3D_INVALID_ID for current shader buffers
246 * and shader buffers in hw state to avoid unintentional unbinding of
247 * shader buffers with uaViewId 0.
248 */
249 for (unsigned i = 0; i < ARRAY_SIZE(svga->curr.shader_buffers[shader]);
250 i++, hw_buf++, cur_buf++) {
251 hw_buf->resource = NULL;
252 hw_buf->uav_index = -1;
253 cur_buf->desc.buffer = NULL;
254 cur_buf->resource = NULL;
255 cur_buf->uav_index = -1;
256 }
257 }
258 memset(svga->state.hw_draw.num_shader_buffers, 0,
259 sizeof(svga->state.hw_draw.num_shader_buffers));
260
261 /* Initialize atomic buffers */
262
263 /* Initialize uaViewId to SVGA3D_INVALID_ID for current atomic buffers
264 * and atomic buffers in hw state to avoid unintentional unbinding of
265 * shader buffer with uaViewId 0.
266 */
267 for (unsigned i = 0; i < ARRAY_SIZE(svga->state.hw_draw.atomic_buffers); i++) {
268 svga->curr.atomic_buffers[i].resource = NULL;
269 svga->curr.atomic_buffers[i].uav_index = -1;
270 }
271 svga->state.hw_draw.num_atomic_buffers = 0;
272 }
273
274
275 /**
276 * Cleanup shader image state
277 */
278 void
svga_cleanup_shader_buffer_state(struct svga_context * svga)279 svga_cleanup_shader_buffer_state(struct svga_context *svga)
280 {
281 if (!svga_have_gl43(svga))
282 return;
283
284 svga_destroy_uav(svga);
285 }
286
287
288 /**
289 * Validate shader buffer resources to ensure any pending changes to the
290 * buffers are emitted before they are referenced.
291 * The helper function also rebinds the buffer resources if the rebind flag
292 * is specified.
293 */
294 enum pipe_error
svga_validate_shader_buffer_resources(struct svga_context * svga,unsigned count,struct svga_shader_buffer * bufs,bool rebind)295 svga_validate_shader_buffer_resources(struct svga_context *svga,
296 unsigned count,
297 struct svga_shader_buffer *bufs,
298 bool rebind)
299 {
300 assert(svga_have_gl43(svga));
301
302 struct svga_winsys_surface *surf;
303 enum pipe_error ret;
304 unsigned i;
305
306 for (i = 0; i < count; i++) {
307 if (bufs[i].resource) {
308 assert(bufs[i].resource == bufs[i].desc.buffer);
309
310 struct svga_buffer *sbuf = svga_buffer(bufs[i].resource);
311 surf = svga_buffer_handle(svga, bufs[i].desc.buffer,
312 PIPE_BIND_SHADER_BUFFER);
313 assert(surf);
314 if (rebind) {
315 ret = svga->swc->resource_rebind(svga->swc, surf, NULL,
316 SVGA_RELOC_READ|SVGA_RELOC_WRITE);
317 if (ret != PIPE_OK)
318 return ret;
319 }
320
321 /* Mark buffer as RENDERED */
322 svga_set_buffer_rendered_to(sbuf->bufsurf);
323 }
324 }
325
326 return PIPE_OK;
327 }
328
329
330 /**
331 * Returns TRUE if the shader buffer can be bound to SRV as raw buffer.
332 * It is TRUE if the shader buffer is readonly and the surface already
333 * has the RAW_BUFFER_VIEW bind flag set.
334 */
335 bool
svga_shader_buffer_can_use_srv(struct svga_context * svga,enum pipe_shader_type shader,unsigned index,struct svga_shader_buffer * buf)336 svga_shader_buffer_can_use_srv(struct svga_context *svga,
337 enum pipe_shader_type shader,
338 unsigned index,
339 struct svga_shader_buffer *buf)
340 {
341 if (buf->resource) {
342 struct svga_buffer *sbuf = svga_buffer(buf->resource);
343 if (sbuf && !buf->writeAccess && svga_has_raw_buffer_view(sbuf)) {
344 return true;
345 }
346 }
347 return false;
348 }
349
350
351 #define SVGA_SSBO_SRV_START SVGA_MAX_CONST_BUFS
352
353 /**
354 * Bind the shader buffer as SRV raw buffer.
355 */
356 enum pipe_error
svga_shader_buffer_bind_srv(struct svga_context * svga,enum pipe_shader_type shader,unsigned index,struct svga_shader_buffer * buf)357 svga_shader_buffer_bind_srv(struct svga_context *svga,
358 enum pipe_shader_type shader,
359 unsigned index,
360 struct svga_shader_buffer *buf)
361 {
362 enum pipe_error ret;
363 unsigned slot = index + SVGA_SSBO_SRV_START;
364
365 svga->state.raw_shaderbufs[shader] |= (1 << index);
366 ret = svga_emit_rawbuf(svga, slot, shader, buf->desc.buffer_offset,
367 buf->desc.buffer_size, buf->resource);
368 if (ret == PIPE_OK)
369 svga->state.hw_draw.enabled_raw_shaderbufs[shader] |= (1 << index);
370
371 return ret;
372 }
373
374
375 /**
376 * Unbind the shader buffer SRV.
377 */
378 enum pipe_error
svga_shader_buffer_unbind_srv(struct svga_context * svga,enum pipe_shader_type shader,unsigned index,struct svga_shader_buffer * buf)379 svga_shader_buffer_unbind_srv(struct svga_context *svga,
380 enum pipe_shader_type shader,
381 unsigned index,
382 struct svga_shader_buffer *buf)
383 {
384 enum pipe_error ret = PIPE_OK;
385 unsigned slot = index + SVGA_SSBO_SRV_START;
386
387 if ((svga->state.hw_draw.enabled_raw_shaderbufs[shader] & (1 << index))
388 != 0) {
389 ret = svga_emit_rawbuf(svga, slot, shader, 0, 0, NULL);
390 if (ret == PIPE_OK)
391 svga->state.hw_draw.enabled_raw_shaderbufs[shader] &= ~(1 << index);
392 }
393 svga->state.raw_shaderbufs[shader] &= ~(1 << index);
394 return ret;
395 }
396