xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/svga/svga_resource_buffer.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright (c) 2008-2024 Broadcom. All Rights Reserved.
3  * The term “Broadcom” refers to Broadcom Inc.
4  * and/or its subsidiaries.
5  * SPDX-License-Identifier: MIT
6  */
7 
8 #ifndef SVGA_BUFFER_H
9 #define SVGA_BUFFER_H
10 
11 
12 #include "util/compiler.h"
13 #include "pipe/p_state.h"
14 #include "util/u_transfer.h"
15 
16 #include "svga_screen_cache.h"
17 #include "svga_screen.h"
18 #include "svga_cmd.h"
19 #include "svga_context.h"
20 
21 
22 /**
23  * Maximum number of discontiguous ranges
24  */
25 #define SVGA_BUFFER_MAX_RANGES 32
26 
27 
28 struct svga_context;
29 struct svga_winsys_buffer;
30 struct svga_winsys_surface;
31 
32 struct svga_buffer_range
33 {
34    unsigned start;
35    unsigned end;
36 };
37 
38 struct svga_3d_update_gb_image;
39 
40 /**
41  * This structure describes the bind flags and cache key associated
42  * with the host surface.
43  */
44 struct svga_buffer_surface
45 {
46    struct list_head list;
47    unsigned bind_flags;
48    struct svga_host_surface_cache_key key;
49    struct svga_winsys_surface *handle;
50    enum svga_surface_state surface_state;
51 };
52 
53 /**
54  * SVGA pipe buffer.
55  */
56 struct svga_buffer
57 {
58    struct pipe_resource b;
59 
60    /** This is a superset of b.b.bind */
61    unsigned bind_flags;
62 
63    /**
64     * Regular (non DMA'able) memory.
65     *
66     * Used for user buffers or for buffers which we know before hand that can
67     * never be used by the virtual hardware directly, such as constant buffers.
68     */
69    void *swbuf;
70 
71    /**
72     * Whether swbuf was created by the user or not.
73     */
74    bool user;
75 
76    /**
77     * Whether swbuf is used for this buffer.
78     */
79    bool use_swbuf;
80 
81    /**
82     * Creation key for the host surface handle.
83     *
84     * This structure describes all the host surface characteristics so that it
85     * can be looked up in cache, since creating a host surface is often a slow
86     * operation.
87     */
88    struct svga_host_surface_cache_key key;
89 
90    /**
91     * Host surface handle.
92     *
93     * This is a platform independent abstraction for host SID. We create when
94     * trying to bind.
95     *
96     * Only set for non-user buffers.
97     */
98    struct svga_winsys_surface *handle;
99 
100    /**
101     * List of surfaces created for this buffer resource to support
102     * incompatible bind flags.
103     */
104    struct list_head surfaces;
105 
106    /* Current surface structure */
107    struct svga_buffer_surface *bufsurf;
108 
109    /**
110     * Information about ongoing and past map operations.
111     */
112    struct {
113       /**
114        * Number of concurrent mappings.
115        */
116       unsigned count;
117 
118       /**
119        * Dirty ranges.
120        *
121        * Ranges that were touched by the application and need to be uploaded to
122        * the host.
123        *
124        * This information will be copied into dma.boxes, when emiting the
125        * SVGA3dCmdSurfaceDMA command.
126        */
127       struct svga_buffer_range ranges[SVGA_BUFFER_MAX_RANGES];
128       unsigned num_ranges;
129    } map;
130 
131    /**
132     * Information about uploaded version of user buffers.
133     */
134    struct {
135       struct pipe_resource *buffer;
136 
137       /**
138        * We combine multiple user buffers into the same hardware buffer. This
139        * is the relative offset within that buffer.
140        */
141       unsigned offset;
142 
143       /**
144        * Range of user buffer that is uploaded in @buffer at @offset.
145        */
146       unsigned start;
147       unsigned end;
148    } uploaded;
149 
150    /**
151     * DMA'ble memory.
152     *
153     * A piece of GMR memory, with the same size of the buffer. It is created
154     * when mapping the buffer, and will be used to upload vertex data to the
155     * host.
156     *
157     * Only set for non-user buffers.
158     */
159    struct svga_winsys_buffer *hwbuf;
160 
161    /**
162     * Information about pending DMA uploads.
163     *
164     */
165    struct {
166       /**
167        * Whether this buffer has an unfinished DMA upload command.
168        *
169        * If not set then the rest of the information is null.
170        */
171       bool pending;
172 
173       SVGA3dSurfaceDMAFlags flags;
174 
175       /**
176        * Pointer to the DMA copy box *inside* the command buffer.
177        */
178       SVGA3dCopyBox *boxes;
179 
180       /**
181        * Pointer to the sequence of update commands
182        * *inside* the command buffer.
183        */
184       struct svga_3d_update_gb_image *updates;
185 
186       /**
187        * Context that has the pending DMA to this buffer.
188        */
189       struct svga_context *svga;
190    } dma;
191 
192    /**
193     * Linked list head, used to gather all buffers with pending dma uploads on
194     * a context. It is only valid if the dma.pending is set above.
195     */
196    struct list_head head;
197 
198    unsigned size;  /**< Approximate size in bytes */
199 
200    bool dirty;  /**< Need to do a readback before mapping? */
201    bool uav;    /* Set if the buffer is bound to a uav */
202 
203    /** In some cases we try to keep the results of the translate_indices()
204     * function from svga_draw_elements.c
205     */
206    struct {
207       enum mesa_prim orig_prim, new_prim;
208       struct pipe_resource *buffer;
209       unsigned index_size;
210       unsigned offset;  /**< first index */
211       unsigned count;   /**< num indices */
212    } translated_indices;
213 };
214 
215 
216 static inline struct svga_buffer *
svga_buffer(struct pipe_resource * resource)217 svga_buffer(struct pipe_resource *resource)
218 {
219    struct svga_buffer *buf = (struct svga_buffer *) resource;
220    assert(buf == NULL || buf->b.target == PIPE_BUFFER);
221    return buf;
222 }
223 
224 
225 /**
226  * Returns TRUE for user buffers.  We may
227  * decide to use an alternate upload path for these buffers.
228  */
229 static inline bool
svga_buffer_is_user_buffer(struct pipe_resource * buffer)230 svga_buffer_is_user_buffer(struct pipe_resource *buffer)
231 {
232    if (buffer) {
233       return svga_buffer(buffer)->user;
234    } else {
235       return false;
236    }
237 }
238 
239 /**
240  * Returns a pointer to a struct svga_winsys_screen given a
241  * struct svga_buffer.
242  */
243 static inline struct svga_winsys_screen *
svga_buffer_winsys_screen(struct svga_buffer * sbuf)244 svga_buffer_winsys_screen(struct svga_buffer *sbuf)
245 {
246    return svga_screen(sbuf->b.screen)->sws;
247 }
248 
249 
250 /**
251  * Returns whether a buffer has hardware storage that is
252  * visible to the GPU.
253  */
254 static inline bool
svga_buffer_has_hw_storage(struct svga_buffer * sbuf)255 svga_buffer_has_hw_storage(struct svga_buffer *sbuf)
256 {
257    if (svga_buffer_winsys_screen(sbuf)->have_gb_objects)
258       return (sbuf->handle ? true : false);
259    else
260       return (sbuf->hwbuf ? true : false);
261 }
262 
263 /**
264  * Map the hardware storage of a buffer.
265  * \param flags  bitmask of PIPE_MAP_* flags
266  */
267 static inline void *
svga_buffer_hw_storage_map(struct svga_context * svga,struct svga_buffer * sbuf,unsigned flags,bool * retry)268 svga_buffer_hw_storage_map(struct svga_context *svga,
269                            struct svga_buffer *sbuf,
270                            unsigned flags, bool *retry)
271 {
272    struct svga_winsys_screen *sws = svga_buffer_winsys_screen(sbuf);
273 
274    svga->hud.num_buffers_mapped++;
275 
276    if (sws->have_gb_objects) {
277       struct svga_winsys_context *swc = svga->swc;
278       bool rebind;
279       void *map;
280 
281       if (swc->force_coherent) {
282          flags |= PIPE_MAP_PERSISTENT | PIPE_MAP_COHERENT;
283       }
284       map = swc->surface_map(swc, sbuf->handle, flags, retry, &rebind);
285       if (map && rebind) {
286          enum pipe_error ret;
287 
288          ret = SVGA3D_BindGBSurface(swc, sbuf->handle);
289          if (ret != PIPE_OK) {
290             svga_context_flush(svga, NULL);
291             ret = SVGA3D_BindGBSurface(swc, sbuf->handle);
292             assert(ret == PIPE_OK);
293          }
294          svga_context_flush(svga, NULL);
295       }
296       return map;
297    } else {
298       *retry = false;
299       return sws->buffer_map(sws, sbuf->hwbuf, flags);
300    }
301 }
302 
303 /**
304  * Unmap the hardware storage of a buffer.
305  */
306 static inline void
svga_buffer_hw_storage_unmap(struct svga_context * svga,struct svga_buffer * sbuf)307 svga_buffer_hw_storage_unmap(struct svga_context *svga,
308                              struct svga_buffer *sbuf)
309 {
310    struct svga_winsys_screen *sws = svga_buffer_winsys_screen(sbuf);
311 
312    if (sws->have_gb_objects) {
313       struct svga_winsys_context *swc = svga->swc;
314       bool rebind;
315 
316       swc->surface_unmap(swc, sbuf->handle, &rebind);
317       if (rebind) {
318          SVGA_RETRY(svga, SVGA3D_BindGBSurface(swc, sbuf->handle));
319       }
320    } else
321       sws->buffer_unmap(sws, sbuf->hwbuf);
322 
323    /* Mark the buffer surface as UPDATED */
324    assert(sbuf->bufsurf);
325    sbuf->bufsurf->surface_state = SVGA_SURFACE_STATE_UPDATED;
326 }
327 
328 
329 static inline void
svga_set_buffer_rendered_to(struct svga_buffer_surface * bufsurf)330 svga_set_buffer_rendered_to(struct svga_buffer_surface *bufsurf)
331 {
332    bufsurf->surface_state = SVGA_SURFACE_STATE_RENDERED;
333 }
334 
335 
336 static inline bool
svga_was_buffer_rendered_to(const struct svga_buffer_surface * bufsurf)337 svga_was_buffer_rendered_to(const struct svga_buffer_surface *bufsurf)
338 {
339    return (bufsurf->surface_state == SVGA_SURFACE_STATE_RENDERED);
340 }
341 
342 
343 static inline bool
svga_has_raw_buffer_view(struct svga_buffer * sbuf)344 svga_has_raw_buffer_view(struct svga_buffer *sbuf)
345 {
346    return (sbuf->uav ||
347            (sbuf->key.persistent &&
348             (sbuf->key.flags & SVGA3D_SURFACE_BIND_RAW_VIEWS) != 0));
349 }
350 
351 
352 struct pipe_resource *
353 svga_user_buffer_create(struct pipe_screen *screen,
354                         void *ptr,
355                         unsigned bytes,
356                         unsigned usage);
357 
358 struct pipe_resource *
359 svga_buffer_create(struct pipe_screen *screen,
360                    const struct pipe_resource *template);
361 
362 
363 
364 /**
365  * Get the host surface handle for this buffer.
366  *
367  * This will ensure the host surface is updated, issuing DMAs as needed.
368  *
369  * NOTE: This may insert new commands in the context, so it *must* be called
370  * before reserving command buffer space. And, in order to insert commands
371  * it may need to call svga_context_flush().
372  */
373 struct svga_winsys_surface *
374 svga_buffer_handle(struct svga_context *svga,
375                    struct pipe_resource *buf,
376                    unsigned tobind_flags);
377 
378 void
379 svga_context_flush_buffers(struct svga_context *svga);
380 
381 struct svga_winsys_buffer *
382 svga_winsys_buffer_create(struct svga_context *svga,
383                           unsigned alignment,
384                           unsigned usage,
385                           unsigned size);
386 
387 void
388 svga_buffer_transfer_flush_region(struct pipe_context *pipe,
389                                   struct pipe_transfer *transfer,
390                                   const struct pipe_box *box);
391 
392 void
393 svga_resource_destroy(struct pipe_screen *screen,
394                       struct pipe_resource *buf);
395 
396 void *
397 svga_buffer_transfer_map(struct pipe_context *pipe,
398                          struct pipe_resource *resource,
399                          unsigned level,
400                          unsigned usage,
401                          const struct pipe_box *box,
402                          struct pipe_transfer **ptransfer);
403 
404 void
405 svga_buffer_transfer_unmap(struct pipe_context *pipe,
406                            struct pipe_transfer *transfer);
407 
408 #endif /* SVGA_BUFFER_H */
409