1 /*
2 * Copyright © 2022 Imagination Technologies Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <fcntl.h>
26 #include <stdbool.h>
27 #include <stddef.h>
28 #include <stdint.h>
29 #include <sys/mman.h>
30 #include <xf86drm.h>
31
32 #include "pvr_private.h"
33 #include "pvr_srv.h"
34 #include "pvr_srv_bo.h"
35 #include "pvr_srv_bridge.h"
36 #include "pvr_types.h"
37 #include "pvr_winsys_helper.h"
38 #include "util/u_atomic.h"
39 #include "util/bitscan.h"
40 #include "util/macros.h"
41 #include "util/u_math.h"
42 #include "vk_log.h"
43
44 /* Note: This function does not have an associated pvr_srv_free_display_pmr
45 * function, use pvr_srv_free_pmr instead.
46 */
pvr_srv_alloc_display_pmr(struct pvr_srv_winsys * srv_ws,uint64_t size,uint64_t srv_flags,void ** const pmr_out,uint32_t * const handle_out)47 static VkResult pvr_srv_alloc_display_pmr(struct pvr_srv_winsys *srv_ws,
48 uint64_t size,
49 uint64_t srv_flags,
50 void **const pmr_out,
51 uint32_t *const handle_out)
52 {
53 uint64_t aligment_out;
54 uint64_t size_out;
55 VkResult result;
56 uint32_t handle;
57 int ret;
58 int fd;
59
60 result =
61 pvr_winsys_helper_display_buffer_create(&srv_ws->base, size, &handle);
62 if (result != VK_SUCCESS)
63 return result;
64
65 ret = drmPrimeHandleToFD(srv_ws->base.display_fd, handle, O_CLOEXEC, &fd);
66 if (ret) {
67 result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
68 goto err_display_buffer_destroy;
69 }
70
71 result = pvr_srv_physmem_import_dmabuf(srv_ws->base.render_fd,
72 fd,
73 srv_flags,
74 pmr_out,
75 &size_out,
76 &aligment_out);
77
78 assert(size_out >= size);
79 assert(aligment_out == srv_ws->base.page_size);
80
81 /* close fd, not needed anymore */
82 close(fd);
83
84 if (result != VK_SUCCESS)
85 goto err_display_buffer_destroy;
86
87 *handle_out = handle;
88
89 return VK_SUCCESS;
90
91 err_display_buffer_destroy:
92 pvr_winsys_helper_display_buffer_destroy(&srv_ws->base, handle);
93
94 return result;
95 }
96
buffer_acquire(struct pvr_srv_winsys_bo * srv_bo)97 static void buffer_acquire(struct pvr_srv_winsys_bo *srv_bo)
98 {
99 p_atomic_inc(&srv_bo->ref_count);
100 }
101
buffer_release(struct pvr_srv_winsys_bo * srv_bo)102 static void buffer_release(struct pvr_srv_winsys_bo *srv_bo)
103 {
104 struct pvr_winsys *ws;
105
106 /* If all references were dropped the pmr can be freed and unlocked */
107 if (p_atomic_dec_return(&srv_bo->ref_count) != 0)
108 return;
109
110 ws = srv_bo->base.ws;
111 pvr_srv_free_pmr(ws->render_fd, srv_bo->pmr);
112
113 if (srv_bo->is_display_buffer)
114 pvr_winsys_helper_display_buffer_destroy(ws, srv_bo->handle);
115
116 vk_free(ws->alloc, srv_bo);
117 }
118
pvr_srv_get_alloc_flags(uint32_t ws_flags)119 static uint64_t pvr_srv_get_alloc_flags(uint32_t ws_flags)
120 {
121 /* TODO: For now we assume that buffers should always be accessible to the
122 * kernel and that the PVR_WINSYS_BO_FLAG_CPU_ACCESS flag only applies to
123 * userspace mappings. Check to see if there's any situations where we
124 * wouldn't want this to be the case.
125 */
126 uint64_t srv_flags =
127 PVR_SRV_MEMALLOCFLAG_GPU_READABLE | PVR_SRV_MEMALLOCFLAG_GPU_WRITEABLE |
128 PVR_SRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
129 PVR_SRV_MEMALLOCFLAG_CPU_UNCACHED_WC | PVR_SRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
130
131 if (ws_flags & PVR_WINSYS_BO_FLAG_CPU_ACCESS) {
132 srv_flags |= PVR_SRV_MEMALLOCFLAG_CPU_READABLE |
133 PVR_SRV_MEMALLOCFLAG_CPU_WRITEABLE;
134 }
135
136 if (ws_flags & PVR_WINSYS_BO_FLAG_GPU_UNCACHED)
137 srv_flags |= PVR_SRV_MEMALLOCFLAG_GPU_UNCACHED;
138 else
139 srv_flags |= PVR_SRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT;
140
141 if (ws_flags & PVR_WINSYS_BO_FLAG_PM_FW_PROTECT)
142 srv_flags |= PVR_SRV_MEMALLOCFLAG_DEVICE_FLAG(PM_FW_PROTECT);
143
144 return srv_flags;
145 }
146
pvr_srv_winsys_buffer_create(struct pvr_winsys * ws,uint64_t size,uint64_t alignment,enum pvr_winsys_bo_type type,uint32_t ws_flags,struct pvr_winsys_bo ** const bo_out)147 VkResult pvr_srv_winsys_buffer_create(struct pvr_winsys *ws,
148 uint64_t size,
149 uint64_t alignment,
150 enum pvr_winsys_bo_type type,
151 uint32_t ws_flags,
152 struct pvr_winsys_bo **const bo_out)
153 {
154 const uint64_t srv_flags = pvr_srv_get_alloc_flags(ws_flags);
155 struct pvr_srv_winsys *srv_ws = to_pvr_srv_winsys(ws);
156 struct pvr_srv_winsys_bo *srv_bo;
157 VkResult result;
158
159 assert(util_is_power_of_two_nonzero64(alignment));
160
161 /* Kernel will page align the size, we do the same here so we have access to
162 * all the allocated memory.
163 */
164 alignment = MAX2(alignment, ws->page_size);
165 size = ALIGN_POT(size, alignment);
166
167 srv_bo = vk_zalloc(ws->alloc,
168 sizeof(*srv_bo),
169 8,
170 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
171 if (!srv_bo)
172 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
173
174 srv_bo->is_display_buffer = (type == PVR_WINSYS_BO_TYPE_DISPLAY);
175 if (srv_bo->is_display_buffer) {
176 result = pvr_srv_alloc_display_pmr(srv_ws,
177 size,
178 srv_flags &
179 PVR_SRV_MEMALLOCFLAGS_PMRFLAGSMASK,
180 &srv_bo->pmr,
181 &srv_bo->handle);
182
183 srv_bo->base.is_imported = true;
184 } else {
185 result =
186 pvr_srv_alloc_pmr(ws->render_fd,
187 size,
188 size,
189 1,
190 1,
191 ws->log2_page_size,
192 (srv_flags & PVR_SRV_MEMALLOCFLAGS_PMRFLAGSMASK),
193 getpid(),
194 &srv_bo->pmr);
195 }
196
197 if (result != VK_SUCCESS)
198 goto err_vk_free_srv_bo;
199
200 srv_bo->base.size = size;
201 srv_bo->base.ws = ws;
202 srv_bo->flags = srv_flags;
203
204 p_atomic_set(&srv_bo->ref_count, 1);
205
206 *bo_out = &srv_bo->base;
207
208 return VK_SUCCESS;
209
210 err_vk_free_srv_bo:
211 vk_free(ws->alloc, srv_bo);
212
213 return result;
214 }
215
216 VkResult
pvr_srv_winsys_buffer_create_from_fd(struct pvr_winsys * ws,int fd,struct pvr_winsys_bo ** const bo_out)217 pvr_srv_winsys_buffer_create_from_fd(struct pvr_winsys *ws,
218 int fd,
219 struct pvr_winsys_bo **const bo_out)
220 {
221 /* FIXME: PVR_SRV_MEMALLOCFLAG_CPU_UNCACHED_WC should be changed to
222 * PVR_SRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT, as dma-buf is always mapped
223 * as cacheable by the exporter. Flags are not passed to the exporter and it
224 * doesn't really change the behavior, but these can be used for internal
225 * checking so it should reflect the correct cachability of the buffer.
226 * Ref: pvr_GetMemoryFdPropertiesKHR
227 * https://www.kernel.org/doc/html/latest/driver-api/dma-buf.html#c.dma_buf_ops
228 */
229 static const uint64_t srv_flags =
230 PVR_SRV_MEMALLOCFLAG_CPU_READABLE | PVR_SRV_MEMALLOCFLAG_CPU_WRITEABLE |
231 PVR_SRV_MEMALLOCFLAG_CPU_UNCACHED_WC | PVR_SRV_MEMALLOCFLAG_GPU_READABLE |
232 PVR_SRV_MEMALLOCFLAG_GPU_WRITEABLE |
233 PVR_SRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT;
234 struct pvr_srv_winsys_bo *srv_bo;
235 uint64_t aligment_out;
236 uint64_t size_out;
237 VkResult result;
238
239 srv_bo = vk_zalloc(ws->alloc,
240 sizeof(*srv_bo),
241 8,
242 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
243 if (!srv_bo)
244 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
245
246 result = pvr_srv_physmem_import_dmabuf(ws->render_fd,
247 fd,
248 srv_flags,
249 &srv_bo->pmr,
250 &size_out,
251 &aligment_out);
252 if (result != VK_SUCCESS)
253 goto err_vk_free_srv_bo;
254
255 assert(aligment_out == ws->page_size);
256
257 srv_bo->base.ws = ws;
258 srv_bo->base.size = size_out;
259 srv_bo->base.is_imported = true;
260 srv_bo->flags = srv_flags;
261
262 p_atomic_set(&srv_bo->ref_count, 1);
263
264 *bo_out = &srv_bo->base;
265
266 return VK_SUCCESS;
267
268 err_vk_free_srv_bo:
269 vk_free(ws->alloc, srv_bo);
270
271 return result;
272 }
273
pvr_srv_winsys_buffer_destroy(struct pvr_winsys_bo * bo)274 void pvr_srv_winsys_buffer_destroy(struct pvr_winsys_bo *bo)
275 {
276 struct pvr_srv_winsys_bo *srv_bo = to_pvr_srv_winsys_bo(bo);
277
278 buffer_release(srv_bo);
279 }
280
pvr_srv_winsys_buffer_get_fd(struct pvr_winsys_bo * bo,int * const fd_out)281 VkResult pvr_srv_winsys_buffer_get_fd(struct pvr_winsys_bo *bo,
282 int *const fd_out)
283 {
284 struct pvr_srv_winsys_bo *srv_bo = to_pvr_srv_winsys_bo(bo);
285 struct pvr_winsys *ws = bo->ws;
286 int ret;
287
288 if (!srv_bo->is_display_buffer)
289 return pvr_srv_physmem_export_dmabuf(ws->render_fd, srv_bo->pmr, fd_out);
290
291 /* For display buffers, export using saved buffer handle */
292 ret = drmPrimeHandleToFD(ws->display_fd, srv_bo->handle, O_CLOEXEC, fd_out);
293 if (ret)
294 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
295
296 return VK_SUCCESS;
297 }
298
pvr_srv_winsys_buffer_map(struct pvr_winsys_bo * bo)299 VkResult pvr_srv_winsys_buffer_map(struct pvr_winsys_bo *bo)
300 {
301 struct pvr_srv_winsys_bo *srv_bo = to_pvr_srv_winsys_bo(bo);
302 struct pvr_winsys *ws = bo->ws;
303 const int prot =
304 (srv_bo->flags & PVR_SRV_MEMALLOCFLAG_CPU_WRITEABLE ? PROT_WRITE : 0) |
305 (srv_bo->flags & PVR_SRV_MEMALLOCFLAG_CPU_READABLE ? PROT_READ : 0);
306 VkResult result;
307
308 /* assert if memory is already mapped */
309 assert(!bo->map);
310
311 /* Map the full PMR to CPU space */
312 result = pvr_mmap(bo->size,
313 prot,
314 MAP_SHARED,
315 ws->render_fd,
316 (off_t)srv_bo->pmr << ws->log2_page_size,
317 &bo->map);
318 if (result != VK_SUCCESS) {
319 bo->map = NULL;
320 return result;
321 }
322
323 VG(VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, true));
324
325 buffer_acquire(srv_bo);
326
327 return VK_SUCCESS;
328 }
329
pvr_srv_winsys_buffer_unmap(struct pvr_winsys_bo * bo)330 void pvr_srv_winsys_buffer_unmap(struct pvr_winsys_bo *bo)
331 {
332 struct pvr_srv_winsys_bo *srv_bo = to_pvr_srv_winsys_bo(bo);
333
334 /* output error if trying to unmap memory that is not previously mapped */
335 assert(bo->map);
336
337 VG(VALGRIND_FREELIKE_BLOCK(bo->map, 0));
338
339 /* Unmap the whole PMR from CPU space */
340 pvr_munmap(bo->map, bo->size);
341
342 bo->map = NULL;
343
344 buffer_release(srv_bo);
345 }
346
347 /* This function must be used to allocate from a heap carveout and must only be
348 * used within the winsys code. This also means whoever is using it, must know
349 * what they are doing.
350 */
pvr_srv_heap_alloc_carveout(struct pvr_winsys_heap * heap,const pvr_dev_addr_t carveout_dev_addr,uint64_t size,uint64_t alignment,struct pvr_winsys_vma ** const vma_out)351 VkResult pvr_srv_heap_alloc_carveout(struct pvr_winsys_heap *heap,
352 const pvr_dev_addr_t carveout_dev_addr,
353 uint64_t size,
354 uint64_t alignment,
355 struct pvr_winsys_vma **const vma_out)
356 {
357 struct pvr_srv_winsys_heap *srv_heap = to_pvr_srv_winsys_heap(heap);
358 struct pvr_winsys *ws = heap->ws;
359 struct pvr_srv_winsys_vma *srv_vma;
360 VkResult result;
361
362 assert(util_is_power_of_two_nonzero64(alignment));
363
364 /* pvr_srv_winsys_buffer_create() page aligns the size. We must do the same
365 * here to ensure enough heap space is allocated to be able to map the
366 * buffer to the GPU.
367 */
368 alignment = MAX2(alignment, heap->ws->page_size);
369 size = ALIGN_POT(size, alignment);
370
371 srv_vma = vk_alloc(ws->alloc,
372 sizeof(*srv_vma),
373 8,
374 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
375 if (!srv_vma) {
376 result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
377 goto err_out;
378 }
379
380 /* Just check address is correct and aligned, locking is not required as
381 * user is responsible to provide a distinct address.
382 */
383 if (carveout_dev_addr.addr < heap->base_addr.addr ||
384 carveout_dev_addr.addr + size >
385 heap->base_addr.addr + heap->static_data_carveout_size ||
386 carveout_dev_addr.addr & ((ws->page_size) - 1)) {
387 result = vk_error(NULL, VK_ERROR_INITIALIZATION_FAILED);
388 goto err_vk_free_srv_vma;
389 }
390
391 /* Reserve the virtual range in the MMU and create a mapping structure */
392 result = pvr_srv_int_reserve_addr(ws->render_fd,
393 srv_heap->server_heap,
394 carveout_dev_addr,
395 size,
396 &srv_vma->reservation);
397 if (result != VK_SUCCESS)
398 goto err_vk_free_srv_vma;
399
400 srv_vma->base.dev_addr = carveout_dev_addr;
401 srv_vma->base.bo = NULL;
402 srv_vma->base.heap = heap;
403 srv_vma->base.size = size;
404
405 p_atomic_inc(&srv_heap->base.ref_count);
406
407 *vma_out = &srv_vma->base;
408
409 return VK_SUCCESS;
410
411 err_vk_free_srv_vma:
412 vk_free(ws->alloc, srv_vma);
413
414 err_out:
415 return result;
416 }
417
pvr_srv_winsys_heap_alloc(struct pvr_winsys_heap * heap,uint64_t size,uint64_t alignment,struct pvr_winsys_vma ** const vma_out)418 VkResult pvr_srv_winsys_heap_alloc(struct pvr_winsys_heap *heap,
419 uint64_t size,
420 uint64_t alignment,
421 struct pvr_winsys_vma **const vma_out)
422 {
423 struct pvr_srv_winsys_heap *const srv_heap = to_pvr_srv_winsys_heap(heap);
424 struct pvr_srv_winsys *const srv_ws = to_pvr_srv_winsys(heap->ws);
425 struct pvr_srv_winsys_vma *srv_vma;
426 VkResult result;
427
428 srv_vma = vk_alloc(srv_ws->base.alloc,
429 sizeof(*srv_vma),
430 8,
431 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
432 if (!srv_vma) {
433 result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
434 goto err_out;
435 }
436
437 result = pvr_winsys_helper_heap_alloc(heap, size, alignment, &srv_vma->base);
438 if (result != VK_SUCCESS)
439 goto err_pvr_srv_free_vma;
440
441 /* Reserve the virtual range in the MMU and create a mapping structure. */
442 result = pvr_srv_int_reserve_addr(srv_ws->base.render_fd,
443 srv_heap->server_heap,
444 srv_vma->base.dev_addr,
445 srv_vma->base.size,
446 &srv_vma->reservation);
447 if (result != VK_SUCCESS)
448 goto err_pvr_srv_free_allocation;
449
450 *vma_out = &srv_vma->base;
451
452 return VK_SUCCESS;
453
454 err_pvr_srv_free_allocation:
455 pvr_winsys_helper_heap_free(&srv_vma->base);
456
457 err_pvr_srv_free_vma:
458 vk_free(srv_ws->base.alloc, srv_vma);
459
460 err_out:
461 return result;
462 }
463
pvr_srv_winsys_heap_free(struct pvr_winsys_vma * vma)464 void pvr_srv_winsys_heap_free(struct pvr_winsys_vma *vma)
465 {
466 struct pvr_srv_winsys *srv_ws = to_pvr_srv_winsys(vma->heap->ws);
467 struct pvr_srv_winsys_vma *srv_vma = to_pvr_srv_winsys_vma(vma);
468
469 /* A vma with an existing device mapping should not be freed. */
470 assert(!vma->bo);
471
472 /* Remove mapping handle and underlying reservation. */
473 pvr_srv_int_unreserve_addr(srv_ws->base.render_fd, srv_vma->reservation);
474
475 /* Check if we are dealing with carveout address range. */
476 if (vma->dev_addr.addr <
477 (vma->heap->base_addr.addr + vma->heap->static_data_carveout_size)) {
478 /* For the carveout addresses just decrement the reference count. */
479 p_atomic_dec(&vma->heap->ref_count);
480 } else {
481 /* Free allocated virtual space. */
482 pvr_winsys_helper_heap_free(vma);
483 }
484
485 vk_free(srv_ws->base.alloc, srv_vma);
486 }
487
488 /* * We assume the vma has been allocated with extra space to accommodate the
489 * offset.
490 * * The offset passed in is unchanged and can be used to calculate the extra
491 * size that needs to be mapped and final device virtual address.
492 */
pvr_srv_winsys_vma_map(struct pvr_winsys_vma * vma,struct pvr_winsys_bo * bo,uint64_t offset,uint64_t size,pvr_dev_addr_t * const dev_addr_out)493 VkResult pvr_srv_winsys_vma_map(struct pvr_winsys_vma *vma,
494 struct pvr_winsys_bo *bo,
495 uint64_t offset,
496 uint64_t size,
497 pvr_dev_addr_t *const dev_addr_out)
498 {
499 struct pvr_srv_winsys_vma *srv_vma = to_pvr_srv_winsys_vma(vma);
500 struct pvr_srv_winsys_bo *srv_bo = to_pvr_srv_winsys_bo(bo);
501 struct pvr_srv_winsys *srv_ws = to_pvr_srv_winsys(bo->ws);
502 const uint64_t srv_flags = srv_bo->flags &
503 PVR_SRV_MEMALLOCFLAGS_VIRTUAL_MASK;
504 const uint32_t virt_offset = offset & (vma->heap->page_size - 1);
505 const uint64_t aligned_virt_size =
506 ALIGN_POT(virt_offset + size, vma->heap->page_size);
507 VkResult result;
508
509 /* Address should not be mapped already */
510 assert(!vma->bo);
511
512 if (srv_bo->is_display_buffer) {
513 struct pvr_srv_winsys_heap *srv_heap = to_pvr_srv_winsys_heap(vma->heap);
514
515 /* In case of display buffers, we only support to map whole PMR */
516 if (offset != 0 || bo->size != ALIGN_POT(size, srv_ws->base.page_size) ||
517 vma->size != bo->size) {
518 return vk_error(NULL, VK_ERROR_MEMORY_MAP_FAILED);
519 }
520
521 /* Map the requested pmr */
522 result = pvr_srv_int_map_pmr(srv_ws->base.render_fd,
523 srv_heap->server_heap,
524 srv_vma->reservation,
525 srv_bo->pmr,
526 srv_flags,
527 &srv_vma->mapping);
528
529 } else {
530 const uint32_t phys_page_offset = (offset - virt_offset) >>
531 srv_ws->base.log2_page_size;
532 const uint32_t phys_page_count = aligned_virt_size >>
533 srv_ws->base.log2_page_size;
534
535 /* Check if bo and vma can accommodate the given size and offset */
536 if (ALIGN_POT(offset + size, vma->heap->page_size) > bo->size ||
537 aligned_virt_size > vma->size) {
538 return vk_error(NULL, VK_ERROR_MEMORY_MAP_FAILED);
539 }
540
541 /* Map the requested pages */
542 result = pvr_srv_int_map_pages(srv_ws->base.render_fd,
543 srv_vma->reservation,
544 srv_bo->pmr,
545 phys_page_count,
546 phys_page_offset,
547 srv_flags,
548 vma->dev_addr);
549 }
550
551 if (result != VK_SUCCESS)
552 return result;
553
554 buffer_acquire(srv_bo);
555
556 vma->bo = bo;
557 vma->bo_offset = offset;
558 vma->mapped_size = aligned_virt_size;
559
560 if (dev_addr_out)
561 *dev_addr_out = PVR_DEV_ADDR_OFFSET(vma->dev_addr, virt_offset);
562
563 return VK_SUCCESS;
564 }
565
pvr_srv_winsys_vma_unmap(struct pvr_winsys_vma * vma)566 void pvr_srv_winsys_vma_unmap(struct pvr_winsys_vma *vma)
567 {
568 struct pvr_srv_winsys *srv_ws = to_pvr_srv_winsys(vma->heap->ws);
569 struct pvr_srv_winsys_vma *srv_vma = to_pvr_srv_winsys_vma(vma);
570 struct pvr_srv_winsys_bo *srv_bo;
571
572 /* Address should be mapped */
573 assert(vma->bo);
574
575 srv_bo = to_pvr_srv_winsys_bo(vma->bo);
576
577 if (srv_bo->is_display_buffer) {
578 /* Unmap the requested pmr */
579 pvr_srv_int_unmap_pmr(srv_ws->base.render_fd, srv_vma->mapping);
580 } else {
581 /* Unmap requested pages */
582 pvr_srv_int_unmap_pages(srv_ws->base.render_fd,
583 srv_vma->reservation,
584 vma->dev_addr,
585 vma->mapped_size >> srv_ws->base.log2_page_size);
586 }
587
588 buffer_release(srv_bo);
589
590 vma->bo = NULL;
591 }
592