xref: /aosp_15_r20/external/mesa3d/src/gallium/winsys/svga/drm/vmw_buffer.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright (c) 2009-2024 Broadcom. All Rights Reserved.
3  * The term “Broadcom” refers to Broadcom Inc.
4  * and/or its subsidiaries.
5  * SPDX-License-Identifier: MIT
6  */
7 
8 /**
9  * @file
10  * SVGA buffer manager for DMA buffers.
11  *
12  * DMA buffers are used for pixel and vertex data upload/download to/from
13  * the virtual SVGA hardware.
14  *
15  * This file implements a pipebuffer library's buffer manager, so that we can
16  * use pipepbuffer's suballocation, fencing, and debugging facilities with
17  * DMA buffers.
18  *
19  * @author Jose Fonseca <[email protected]>
20  */
21 
22 
23 #include "svga_cmd.h"
24 
25 #include "util/u_inlines.h"
26 #include "util/u_memory.h"
27 #include "pipebuffer/pb_buffer.h"
28 #include "pipebuffer/pb_bufmgr.h"
29 
30 #include "svga_winsys.h"
31 
32 #include "vmw_screen.h"
33 #include "vmw_buffer.h"
34 
35 struct vmw_dma_bufmgr;
36 
37 
38 struct vmw_dma_buffer
39 {
40    struct pb_buffer base;
41 
42    struct vmw_dma_bufmgr *mgr;
43 
44    struct vmw_region *region;
45    void *map;
46    unsigned map_flags;
47    unsigned map_count;
48 };
49 
50 
51 extern const struct pb_vtbl vmw_dma_buffer_vtbl;
52 
53 
54 static inline struct vmw_dma_buffer *
vmw_pb_to_dma_buffer(struct pb_buffer * buf)55 vmw_pb_to_dma_buffer(struct pb_buffer *buf)
56 {
57    assert(buf);
58    assert(buf->vtbl == &vmw_dma_buffer_vtbl);
59    return container_of(buf, struct vmw_dma_buffer, base);
60 }
61 
62 
63 struct vmw_dma_bufmgr
64 {
65    struct pb_manager base;
66 
67    struct vmw_winsys_screen *vws;
68 };
69 
70 
71 static inline struct vmw_dma_bufmgr *
vmw_pb_to_dma_bufmgr(struct pb_manager * mgr)72 vmw_pb_to_dma_bufmgr(struct pb_manager *mgr)
73 {
74    assert(mgr);
75 
76    /* Make sure our extra flags don't collide with pipebuffer's flags */
77    STATIC_ASSERT((VMW_BUFFER_USAGE_SHARED & PB_USAGE_ALL) == 0);
78    STATIC_ASSERT((VMW_BUFFER_USAGE_SYNC & PB_USAGE_ALL) == 0);
79 
80    return container_of(mgr, struct vmw_dma_bufmgr, base);
81 }
82 
83 
84 static void
vmw_dma_buffer_destroy(void * winsys,struct pb_buffer * _buf)85 vmw_dma_buffer_destroy(void *winsys, struct pb_buffer *_buf)
86 {
87    struct vmw_dma_buffer *buf = vmw_pb_to_dma_buffer(_buf);
88 
89    assert(buf->map_count == 0);
90    if (buf->map) {
91       assert(buf->mgr->vws->cache_maps);
92       vmw_ioctl_region_unmap(buf->region);
93    }
94 
95    vmw_ioctl_region_destroy(buf->region);
96 
97    FREE(buf);
98 }
99 
100 
101 static void *
vmw_dma_buffer_map(struct pb_buffer * _buf,enum pb_usage_flags flags,void * flush_ctx)102 vmw_dma_buffer_map(struct pb_buffer *_buf,
103                    enum pb_usage_flags flags,
104                    void *flush_ctx)
105 {
106    struct vmw_dma_buffer *buf = vmw_pb_to_dma_buffer(_buf);
107    int ret;
108 
109    if (!buf->map)
110       buf->map = vmw_ioctl_region_map(buf->region);
111 
112    if (!buf->map)
113       return NULL;
114 
115    if ((_buf->base.usage & VMW_BUFFER_USAGE_SYNC) &&
116        !(flags & PB_USAGE_UNSYNCHRONIZED)) {
117       ret = vmw_ioctl_syncforcpu(buf->region,
118                                  !!(flags & PB_USAGE_DONTBLOCK),
119                                  !(flags & PB_USAGE_CPU_WRITE),
120                                  false);
121       if (ret)
122          return NULL;
123    }
124 
125    buf->map_count++;
126    return buf->map;
127 }
128 
129 
130 static void
vmw_dma_buffer_unmap(struct pb_buffer * _buf)131 vmw_dma_buffer_unmap(struct pb_buffer *_buf)
132 {
133    struct vmw_dma_buffer *buf = vmw_pb_to_dma_buffer(_buf);
134    enum pb_usage_flags flags = buf->map_flags;
135 
136    if ((_buf->base.usage & VMW_BUFFER_USAGE_SYNC) &&
137        !(flags & PB_USAGE_UNSYNCHRONIZED)) {
138       vmw_ioctl_releasefromcpu(buf->region,
139                                !(flags & PB_USAGE_CPU_WRITE),
140                                false);
141    }
142 
143    assert(buf->map_count > 0);
144    if (!--buf->map_count && !buf->mgr->vws->cache_maps) {
145       vmw_ioctl_region_unmap(buf->region);
146       buf->map = NULL;
147    }
148 }
149 
150 
151 static void
vmw_dma_buffer_get_base_buffer(struct pb_buffer * buf,struct pb_buffer ** base_buf,pb_size * offset)152 vmw_dma_buffer_get_base_buffer(struct pb_buffer *buf,
153                                struct pb_buffer **base_buf,
154                                pb_size *offset)
155 {
156    *base_buf = buf;
157    *offset = 0;
158 }
159 
160 
161 static enum pipe_error
vmw_dma_buffer_validate(struct pb_buffer * _buf,struct pb_validate * vl,enum pb_usage_flags flags)162 vmw_dma_buffer_validate( struct pb_buffer *_buf,
163                          struct pb_validate *vl,
164                          enum pb_usage_flags flags )
165 {
166    /* Always pinned */
167    return PIPE_OK;
168 }
169 
170 
171 static void
vmw_dma_buffer_fence(struct pb_buffer * _buf,struct pipe_fence_handle * fence)172 vmw_dma_buffer_fence( struct pb_buffer *_buf,
173                       struct pipe_fence_handle *fence )
174 {
175    /* We don't need to do anything, as the pipebuffer library
176     * will take care of delaying the destruction of fenced buffers */
177 }
178 
179 
180 const struct pb_vtbl vmw_dma_buffer_vtbl = {
181    .destroy = vmw_dma_buffer_destroy,
182    .map = vmw_dma_buffer_map,
183    .unmap = vmw_dma_buffer_unmap,
184    .validate = vmw_dma_buffer_validate,
185    .fence = vmw_dma_buffer_fence,
186    .get_base_buffer = vmw_dma_buffer_get_base_buffer
187 };
188 
189 
190 static struct pb_buffer *
vmw_dma_bufmgr_create_buffer(struct pb_manager * _mgr,pb_size size,const struct pb_desc * pb_desc)191 vmw_dma_bufmgr_create_buffer(struct pb_manager *_mgr,
192                              pb_size size,
193                              const struct pb_desc *pb_desc)
194 {
195    struct vmw_dma_bufmgr *mgr = vmw_pb_to_dma_bufmgr(_mgr);
196    struct vmw_winsys_screen *vws = mgr->vws;
197    struct vmw_dma_buffer *buf;
198    const struct vmw_buffer_desc *desc =
199       (const struct vmw_buffer_desc *) pb_desc;
200 
201    buf = CALLOC_STRUCT(vmw_dma_buffer);
202    if(!buf)
203       goto error1;
204 
205    pipe_reference_init(&buf->base.base.reference, 1);
206    buf->base.base.alignment_log2 = util_logbase2(pb_desc->alignment);
207    buf->base.base.usage = pb_desc->usage & ~VMW_BUFFER_USAGE_SHARED;
208    buf->base.vtbl = &vmw_dma_buffer_vtbl;
209    buf->mgr = mgr;
210    buf->base.base.size = size;
211    if ((pb_desc->usage & VMW_BUFFER_USAGE_SHARED) && desc->region) {
212       buf->region = desc->region;
213    } else {
214       buf->region = vmw_ioctl_region_create(vws, size);
215       if(!buf->region)
216 	 goto error2;
217    }
218 
219    return &buf->base;
220 error2:
221    FREE(buf);
222 error1:
223    return NULL;
224 }
225 
226 
227 static void
vmw_dma_bufmgr_flush(struct pb_manager * mgr)228 vmw_dma_bufmgr_flush(struct pb_manager *mgr)
229 {
230    /* No-op */
231 }
232 
233 
234 static void
vmw_dma_bufmgr_destroy(struct pb_manager * _mgr)235 vmw_dma_bufmgr_destroy(struct pb_manager *_mgr)
236 {
237    struct vmw_dma_bufmgr *mgr = vmw_pb_to_dma_bufmgr(_mgr);
238    FREE(mgr);
239 }
240 
241 
242 struct pb_manager *
vmw_dma_bufmgr_create(struct vmw_winsys_screen * vws)243 vmw_dma_bufmgr_create(struct vmw_winsys_screen *vws)
244 {
245    struct vmw_dma_bufmgr *mgr;
246 
247    mgr = CALLOC_STRUCT(vmw_dma_bufmgr);
248    if(!mgr)
249       return NULL;
250 
251    mgr->base.destroy = vmw_dma_bufmgr_destroy;
252    mgr->base.create_buffer = vmw_dma_bufmgr_create_buffer;
253    mgr->base.flush = vmw_dma_bufmgr_flush;
254 
255    mgr->vws = vws;
256 
257    return &mgr->base;
258 }
259 
260 
261 bool
vmw_dma_bufmgr_region_ptr(struct pb_buffer * buf,struct SVGAGuestPtr * ptr)262 vmw_dma_bufmgr_region_ptr(struct pb_buffer *buf,
263                           struct SVGAGuestPtr *ptr)
264 {
265    struct pb_buffer *base_buf;
266    pb_size offset = 0;
267    struct vmw_dma_buffer *dma_buf;
268 
269    pb_get_base_buffer( buf, &base_buf, &offset );
270 
271    dma_buf = vmw_pb_to_dma_buffer(base_buf);
272    if(!dma_buf)
273       return false;
274 
275    *ptr = vmw_ioctl_region_ptr(dma_buf->region);
276 
277    ptr->offset += offset;
278 
279    return true;
280 }
281 
282 #if MESA_DEBUG
283 struct svga_winsys_buffer {
284    struct pb_buffer *pb_buf;
285    struct debug_flush_buf *fbuf;
286 };
287 
288 struct pb_buffer *
vmw_pb_buffer(struct svga_winsys_buffer * buffer)289 vmw_pb_buffer(struct svga_winsys_buffer *buffer)
290 {
291    assert(buffer);
292    return buffer->pb_buf;
293 }
294 
295 struct svga_winsys_buffer *
vmw_svga_winsys_buffer_wrap(struct pb_buffer * buffer)296 vmw_svga_winsys_buffer_wrap(struct pb_buffer *buffer)
297 {
298    struct svga_winsys_buffer *buf;
299 
300    if (!buffer)
301       return NULL;
302 
303    buf = CALLOC_STRUCT(svga_winsys_buffer);
304    if (!buf) {
305       pb_reference(&buffer, NULL);
306       return NULL;
307    }
308 
309    buf->pb_buf = buffer;
310    buf->fbuf = debug_flush_buf_create(false, VMW_DEBUG_FLUSH_STACK);
311    return buf;
312 }
313 
314 struct debug_flush_buf *
vmw_debug_flush_buf(struct svga_winsys_buffer * buffer)315 vmw_debug_flush_buf(struct svga_winsys_buffer *buffer)
316 {
317    return buffer->fbuf;
318 }
319 
320 #endif
321 
322 void
vmw_svga_winsys_buffer_destroy(struct svga_winsys_screen * sws,struct svga_winsys_buffer * buf)323 vmw_svga_winsys_buffer_destroy(struct svga_winsys_screen *sws,
324                                struct svga_winsys_buffer *buf)
325 {
326    struct pb_buffer *pbuf = vmw_pb_buffer(buf);
327    (void)sws;
328    pb_reference(&pbuf, NULL);
329 #if MESA_DEBUG
330    debug_flush_buf_reference(&buf->fbuf, NULL);
331    FREE(buf);
332 #endif
333 }
334 
335 void *
vmw_svga_winsys_buffer_map(struct svga_winsys_screen * sws,struct svga_winsys_buffer * buf,enum pipe_map_flags flags)336 vmw_svga_winsys_buffer_map(struct svga_winsys_screen *sws,
337                            struct svga_winsys_buffer *buf,
338                            enum pipe_map_flags flags)
339 {
340    void *map;
341    enum pb_usage_flags pb_flags = 0;
342 
343    (void)sws;
344    if (flags & PIPE_MAP_UNSYNCHRONIZED)
345       flags &= ~PIPE_MAP_DONTBLOCK;
346 
347    if (flags & PIPE_MAP_READ)
348       pb_flags |= PB_USAGE_CPU_READ;
349    if (flags & PIPE_MAP_WRITE)
350       pb_flags |= PB_USAGE_CPU_WRITE;
351    if (flags & PIPE_MAP_DIRECTLY)
352       pb_flags |= PB_USAGE_GPU_READ;
353    if (flags & PIPE_MAP_DONTBLOCK)
354       pb_flags |= PB_USAGE_DONTBLOCK;
355    if (flags & PIPE_MAP_UNSYNCHRONIZED)
356       pb_flags |= PB_USAGE_UNSYNCHRONIZED;
357    if (flags & PIPE_MAP_PERSISTENT)
358       pb_flags |= PB_USAGE_PERSISTENT;
359 
360    map = pb_map(vmw_pb_buffer(buf), pb_flags, NULL);
361 
362 #if MESA_DEBUG
363    if (map != NULL)
364       debug_flush_map(buf->fbuf, pb_flags);
365 #endif
366 
367    return map;
368 }
369 
370 
371 void
vmw_svga_winsys_buffer_unmap(struct svga_winsys_screen * sws,struct svga_winsys_buffer * buf)372 vmw_svga_winsys_buffer_unmap(struct svga_winsys_screen *sws,
373                              struct svga_winsys_buffer *buf)
374 {
375    (void)sws;
376 
377 #if MESA_DEBUG
378    debug_flush_unmap(buf->fbuf);
379 #endif
380 
381    pb_unmap(vmw_pb_buffer(buf));
382 }
383