1 /*
2 * Copyright © 2022 Imagination Technologies Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <errno.h>
26 #include <stdbool.h>
27 #include <stdint.h>
28 #include <string.h>
29 #include <sys/ioctl.h>
30 #include <xf86drm.h>
31
32 #include "fw-api/pvr_rogue_fwif_shared.h"
33 #include "pvr_private.h"
34 #include "pvr_srv.h"
35 #include "pvr_srv_bridge.h"
36 #include "pvr_types.h"
37 #include "util/log.h"
38 #include "util/macros.h"
39 #include "vk_log.h"
40
41 #define vk_bridge_err(vk_err, bridge_func, bridge_ret) \
42 vk_errorf(NULL, \
43 vk_err, \
44 "%s failed, PVR_SRV_ERROR: %d, Errno: %s", \
45 bridge_func, \
46 (bridge_ret).error, \
47 strerror(errno))
48
pvr_srv_bridge_call(int fd,uint8_t bridge_id,uint32_t function_id,void * input,uint32_t input_buffer_size,void * output,uint32_t output_buffer_size)49 static int pvr_srv_bridge_call(int fd,
50 uint8_t bridge_id,
51 uint32_t function_id,
52 void *input,
53 uint32_t input_buffer_size,
54 void *output,
55 uint32_t output_buffer_size)
56 {
57 struct drm_srvkm_cmd cmd = {
58 .bridge_id = bridge_id,
59 .bridge_func_id = function_id,
60 .in_data_ptr = (uint64_t)(uintptr_t)input,
61 .out_data_ptr = (uint64_t)(uintptr_t)output,
62 .in_data_size = input_buffer_size,
63 .out_data_size = output_buffer_size,
64 };
65
66 int ret = drmIoctl(fd, DRM_IOCTL_SRVKM_CMD, &cmd);
67 if (unlikely(ret))
68 return ret;
69
70 VG(VALGRIND_MAKE_MEM_DEFINED(output, output_buffer_size));
71
72 return 0U;
73 }
74
pvr_srv_init_module(int fd,enum pvr_srvkm_module_type module)75 VkResult pvr_srv_init_module(int fd, enum pvr_srvkm_module_type module)
76 {
77 struct drm_srvkm_init_data init_data = { .init_module = module };
78
79 int ret = drmIoctl(fd, DRM_IOCTL_SRVKM_INIT, &init_data);
80 if (unlikely(ret)) {
81 return vk_errorf(NULL,
82 VK_ERROR_INITIALIZATION_FAILED,
83 "DRM_IOCTL_SRVKM_INIT failed, Errno: %s",
84 strerror(errno));
85 }
86
87 return VK_SUCCESS;
88 }
89
pvr_srv_set_timeline_sw_only(int sw_timeline_fd)90 VkResult pvr_srv_set_timeline_sw_only(int sw_timeline_fd)
91 {
92 int ret;
93
94 assert(sw_timeline_fd >= 0);
95
96 ret = drmIoctl(sw_timeline_fd, DRM_IOCTL_SRVKM_SYNC_FORCE_SW_ONLY_CMD, NULL);
97
98 if (unlikely(ret < 0)) {
99 return vk_errorf(
100 NULL,
101 VK_ERROR_OUT_OF_HOST_MEMORY,
102 "DRM_IOCTL_SRVKM_SYNC_FORCE_SW_ONLY_CMD failed, Errno: %s",
103 strerror(errno));
104 }
105
106 return VK_SUCCESS;
107 }
108
pvr_srv_create_sw_fence(int sw_timeline_fd,int * new_fence_fd,uint64_t * sync_pt_idx)109 VkResult pvr_srv_create_sw_fence(int sw_timeline_fd,
110 int *new_fence_fd,
111 uint64_t *sync_pt_idx)
112 {
113 struct drm_srvkm_sw_sync_create_fence_data data = { .name[0] = '\0' };
114 int ret;
115
116 assert(sw_timeline_fd >= 0);
117 assert(new_fence_fd != NULL);
118
119 ret =
120 drmIoctl(sw_timeline_fd, DRM_IOCTL_SRVKM_SW_SYNC_CREATE_FENCE_CMD, &data);
121
122 if (unlikely(ret < 0)) {
123 return vk_errorf(
124 NULL,
125 VK_ERROR_OUT_OF_HOST_MEMORY,
126 "DRM_IOCTL_SRVKM_SW_SYNC_CREATE_FENCE_CMD failed, Errno: %s",
127 strerror(errno));
128 }
129
130 *new_fence_fd = data.fence;
131 if (sync_pt_idx)
132 *sync_pt_idx = data.sync_pt_idx;
133
134 return VK_SUCCESS;
135 }
136
pvr_srv_sw_sync_timeline_increment(int sw_timeline_fd,uint64_t * sync_pt_idx)137 VkResult pvr_srv_sw_sync_timeline_increment(int sw_timeline_fd,
138 uint64_t *sync_pt_idx)
139 {
140 struct drm_srvkm_sw_timeline_advance_data data = { 0 };
141 int ret;
142
143 assert(sw_timeline_fd >= 0);
144
145 ret = drmIoctl(sw_timeline_fd, DRM_IOCTL_SRVKM_SW_SYNC_INC_CMD, &data);
146
147 if (unlikely(ret < 0)) {
148 return vk_errorf(NULL,
149 VK_ERROR_OUT_OF_HOST_MEMORY,
150 "DRM_IOCTL_SRVKM_SW_SYNC_INC_CMD failed, Errno: %s",
151 strerror(errno));
152 }
153
154 if (sync_pt_idx)
155 *sync_pt_idx = data.sync_pt_idx;
156
157 return VK_SUCCESS;
158 }
159
pvr_srv_connection_create(int fd,uint64_t * const bvnc_out)160 VkResult pvr_srv_connection_create(int fd, uint64_t *const bvnc_out)
161 {
162 struct pvr_srv_bridge_connect_cmd cmd = {
163 .flags = PVR_SRV_FLAGS_CLIENT_64BIT_COMPAT,
164 .build_options = RGX_BUILD_OPTIONS,
165 .DDK_version = PVR_SRV_VERSION,
166 .DDK_build = PVR_SRV_VERSION_BUILD,
167 };
168
169 /* Initialize ret.error to a default error */
170 struct pvr_srv_bridge_connect_ret ret = {
171 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
172 };
173
174 int result;
175
176 result = pvr_srv_bridge_call(fd,
177 PVR_SRV_BRIDGE_SRVCORE,
178 PVR_SRV_BRIDGE_SRVCORE_CONNECT,
179 &cmd,
180 sizeof(cmd),
181 &ret,
182 sizeof(ret));
183 if (result || ret.error != PVR_SRV_OK) {
184 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
185 "PVR_SRV_BRIDGE_SRVCORE_CONNECT",
186 ret);
187 }
188
189 *bvnc_out = ret.bvnc;
190
191 return VK_SUCCESS;
192 }
193
pvr_srv_connection_destroy(int fd)194 void pvr_srv_connection_destroy(int fd)
195 {
196 /* Initialize ret.error to a default error */
197 struct pvr_srv_bridge_disconnect_ret ret = {
198 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
199 };
200
201 int result;
202
203 result = pvr_srv_bridge_call(fd,
204 PVR_SRV_BRIDGE_SRVCORE,
205 PVR_SRV_BRIDGE_SRVCORE_DISCONNECT,
206 NULL,
207 0,
208 &ret,
209 sizeof(ret));
210 if (result || ret.error != PVR_SRV_OK) {
211 vk_bridge_err(VK_ERROR_UNKNOWN, "PVR_SRV_BRIDGE_SRVCORE_DISCONNECT", ret);
212 }
213 }
214
pvr_srv_get_multicore_info(int fd,uint32_t caps_size,uint64_t * caps,uint32_t * num_cores)215 VkResult pvr_srv_get_multicore_info(int fd,
216 uint32_t caps_size,
217 uint64_t *caps,
218 uint32_t *num_cores)
219 {
220 struct pvr_srv_bridge_getmulticoreinfo_cmd cmd = {
221 .caps = caps,
222 .caps_size = caps_size,
223 };
224
225 struct pvr_srv_bridge_getmulticoreinfo_ret ret = {
226 .caps = caps,
227 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
228 };
229
230 int result;
231
232 result = pvr_srv_bridge_call(fd,
233 PVR_SRV_BRIDGE_SRVCORE,
234 PVR_SRV_BRIDGE_SRVCORE_GETMULTICOREINFO,
235 &cmd,
236 sizeof(cmd),
237 &ret,
238 sizeof(ret));
239 if (result || ret.error != PVR_SRV_OK) {
240 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
241 "PVR_SRV_BRIDGE_SRVCORE_GETMULTICOREINFO",
242 ret);
243 }
244
245 if (!num_cores)
246 *num_cores = ret.num_cores;
247
248 return VK_SUCCESS;
249 }
250
pvr_srv_alloc_sync_primitive_block(int fd,void ** const handle_out,void ** const pmr_out,uint32_t * const size_out,uint32_t * const addr_out)251 VkResult pvr_srv_alloc_sync_primitive_block(int fd,
252 void **const handle_out,
253 void **const pmr_out,
254 uint32_t *const size_out,
255 uint32_t *const addr_out)
256 {
257 /* Initialize ret.error to a default error */
258 struct pvr_srv_bridge_alloc_sync_primitive_block_ret ret = {
259 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
260 };
261
262 int result;
263
264 result = pvr_srv_bridge_call(fd,
265 PVR_SRV_BRIDGE_SYNC,
266 PVR_SRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK,
267 NULL,
268 0,
269 &ret,
270 sizeof(ret));
271 if (result || ret.error != PVR_SRV_OK) {
272 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
273 "PVR_SRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK",
274 ret);
275 }
276
277 *handle_out = ret.handle;
278 *pmr_out = ret.pmr;
279 *size_out = ret.size;
280 *addr_out = ret.addr;
281
282 return VK_SUCCESS;
283 }
284
pvr_srv_free_sync_primitive_block(int fd,void * handle)285 void pvr_srv_free_sync_primitive_block(int fd, void *handle)
286 {
287 struct pvr_srv_bridge_free_sync_primitive_block_cmd cmd = {
288 .handle = handle,
289 };
290
291 /* Initialize ret.error to a default error */
292 struct pvr_srv_bridge_free_sync_primitive_block_ret ret = {
293 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
294 };
295
296 int result;
297
298 result = pvr_srv_bridge_call(fd,
299 PVR_SRV_BRIDGE_SYNC,
300 PVR_SRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK,
301 &cmd,
302 sizeof(cmd),
303 &ret,
304 sizeof(ret));
305 if (result || ret.error != PVR_SRV_OK) {
306 vk_bridge_err(VK_ERROR_UNKNOWN,
307 "PVR_SRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK",
308 ret);
309 }
310 }
311
312 VkResult
pvr_srv_set_sync_primitive(int fd,void * handle,uint32_t index,uint32_t value)313 pvr_srv_set_sync_primitive(int fd, void *handle, uint32_t index, uint32_t value)
314 {
315 struct pvr_srv_bridge_sync_prim_set_cmd cmd = {
316 .handle = handle,
317 .index = index,
318 .value = value,
319 };
320
321 struct pvr_srv_bridge_sync_prim_set_ret ret = {
322 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
323 };
324
325 int result;
326
327 result = pvr_srv_bridge_call(fd,
328 PVR_SRV_BRIDGE_SYNC,
329 PVR_SRV_BRIDGE_SYNC_SYNCPRIMSET,
330 &cmd,
331 sizeof(cmd),
332 &ret,
333 sizeof(ret));
334 if (result || ret.error != PVR_SRV_OK) {
335 return vk_bridge_err(VK_ERROR_UNKNOWN,
336 "PVR_SRV_BRIDGE_SYNC_SYNCPRIMSET",
337 ret);
338 }
339
340 return VK_SUCCESS;
341 }
342
pvr_srv_get_heap_count(int fd,uint32_t * const heap_count_out)343 VkResult pvr_srv_get_heap_count(int fd, uint32_t *const heap_count_out)
344 {
345 struct pvr_srv_heap_count_cmd cmd = {
346 .heap_config_index = 0,
347 };
348
349 struct pvr_srv_heap_count_ret ret = {
350 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
351 };
352
353 int result;
354
355 result = pvr_srv_bridge_call(fd,
356 PVR_SRV_BRIDGE_MM,
357 PVR_SRV_BRIDGE_MM_HEAPCFGHEAPCOUNT,
358 &cmd,
359 sizeof(cmd),
360 &ret,
361 sizeof(ret));
362 if (result || ret.error != PVR_SRV_OK) {
363 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
364 "PVR_SRV_BRIDGE_MM_HEAPCFGHEAPCOUNT",
365 ret);
366 }
367
368 *heap_count_out = ret.heap_count;
369
370 return VK_SUCCESS;
371 }
372
pvr_srv_int_heap_create(int fd,pvr_dev_addr_t base_address,uint64_t size,uint32_t log2_page_size,void * server_memctx,void ** const server_heap_out)373 VkResult pvr_srv_int_heap_create(int fd,
374 pvr_dev_addr_t base_address,
375 uint64_t size,
376 uint32_t log2_page_size,
377 void *server_memctx,
378 void **const server_heap_out)
379 {
380 struct pvr_srv_devmem_int_heap_create_cmd cmd = {
381 .server_memctx = server_memctx,
382 .base_addr = base_address,
383 .size = size,
384 .log2_page_size = log2_page_size,
385 };
386
387 struct pvr_srv_devmem_int_heap_create_ret ret = {
388 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
389 };
390
391 int result;
392
393 result = pvr_srv_bridge_call(fd,
394 PVR_SRV_BRIDGE_MM,
395 PVR_SRV_BRIDGE_MM_DEVMEMINTHEAPCREATE,
396 &cmd,
397 sizeof(cmd),
398 &ret,
399 sizeof(ret));
400 if (result || ret.error != PVR_SRV_OK) {
401 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
402 "PVR_SRV_BRIDGE_MM_DEVMEMINTHEAPCREATE",
403 ret);
404 }
405
406 *server_heap_out = ret.server_heap;
407
408 return VK_SUCCESS;
409 }
410
pvr_srv_int_heap_destroy(int fd,void * server_heap)411 void pvr_srv_int_heap_destroy(int fd, void *server_heap)
412 {
413 struct pvr_srv_devmem_int_heap_destroy_cmd cmd = {
414 .server_heap = server_heap,
415 };
416
417 struct pvr_srv_devmem_int_heap_destroy_ret ret = {
418 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
419 };
420
421 int result;
422
423 result = pvr_srv_bridge_call(fd,
424 PVR_SRV_BRIDGE_MM,
425 PVR_SRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY,
426 &cmd,
427 sizeof(cmd),
428 &ret,
429 sizeof(ret));
430 if (result || ret.error != PVR_SRV_OK) {
431 vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
432 "PVR_SRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY",
433 ret);
434 }
435 }
436
437 /* This bridge function allows to independently query heap name and heap
438 * details, i-e buffer/base_address/size/reserved_size/log2_page_size pointers
439 * are allowed to be NULL.
440 */
pvr_srv_get_heap_details(int fd,uint32_t heap_index,uint32_t buffer_size,char * const buffer_out,pvr_dev_addr_t * const base_address_out,uint64_t * const size_out,uint64_t * const reserved_size_out,uint32_t * const log2_page_size_out)441 VkResult pvr_srv_get_heap_details(int fd,
442 uint32_t heap_index,
443 uint32_t buffer_size,
444 char *const buffer_out,
445 pvr_dev_addr_t *const base_address_out,
446 uint64_t *const size_out,
447 uint64_t *const reserved_size_out,
448 uint32_t *const log2_page_size_out)
449 {
450 struct pvr_srv_heap_cfg_details_cmd cmd = {
451 .heap_config_index = 0,
452 .heap_index = heap_index,
453 .buffer_size = buffer_size,
454 .buffer = buffer_out,
455 };
456
457 struct pvr_srv_heap_cfg_details_ret ret = {
458 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
459 .buffer = buffer_out,
460 };
461
462 int result;
463
464 result = pvr_srv_bridge_call(fd,
465 PVR_SRV_BRIDGE_MM,
466 PVR_SRV_BRIDGE_MM_HEAPCFGHEAPDETAILS,
467 &cmd,
468 sizeof(cmd),
469 &ret,
470 sizeof(ret));
471 if (result || ret.error != PVR_SRV_OK) {
472 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
473 "PVR_SRV_BRIDGE_MM_HEAPCFGHEAPDETAILS",
474 ret);
475 }
476
477 VG(VALGRIND_MAKE_MEM_DEFINED(buffer_out, buffer_size));
478
479 if (base_address_out)
480 *base_address_out = ret.base_addr;
481
482 if (size_out)
483 *size_out = ret.size;
484
485 if (reserved_size_out)
486 *reserved_size_out = ret.reserved_size;
487
488 if (log2_page_size_out)
489 *log2_page_size_out = ret.log2_page_size;
490
491 return VK_SUCCESS;
492 }
493
pvr_srv_int_ctx_destroy(int fd,void * server_memctx)494 void pvr_srv_int_ctx_destroy(int fd, void *server_memctx)
495 {
496 struct pvr_srv_devmem_int_ctx_destroy_cmd cmd = {
497 .server_memctx = server_memctx,
498 };
499
500 struct pvr_srv_devmem_int_ctx_destroy_ret ret = {
501 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
502 };
503
504 int result;
505
506 result = pvr_srv_bridge_call(fd,
507 PVR_SRV_BRIDGE_MM,
508 PVR_SRV_BRIDGE_MM_DEVMEMINTCTXDESTROY,
509 &cmd,
510 sizeof(cmd),
511 &ret,
512 sizeof(ret));
513 if (result || ret.error != PVR_SRV_OK) {
514 vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
515 "PVR_SRV_BRIDGE_MM_DEVMEMINTCTXDESTROY",
516 ret);
517 }
518 }
519
pvr_srv_int_ctx_create(int fd,void ** const server_memctx_out,void ** const server_memctx_data_out)520 VkResult pvr_srv_int_ctx_create(int fd,
521 void **const server_memctx_out,
522 void **const server_memctx_data_out)
523 {
524 struct pvr_srv_devmem_int_ctx_create_cmd cmd = {
525 .kernel_memory_ctx = false,
526 };
527
528 struct pvr_srv_devmem_int_ctx_create_ret ret = {
529 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
530 };
531
532 int result;
533
534 result = pvr_srv_bridge_call(fd,
535 PVR_SRV_BRIDGE_MM,
536 PVR_SRV_BRIDGE_MM_DEVMEMINTCTXCREATE,
537 &cmd,
538 sizeof(cmd),
539 &ret,
540 sizeof(ret));
541 if (result || ret.error != PVR_SRV_OK) {
542 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
543 "PVR_SRV_BRIDGE_MM_DEVMEMINTCTXCREATE",
544 ret);
545 }
546
547 *server_memctx_out = ret.server_memctx;
548 *server_memctx_data_out = ret.server_memctx_data;
549
550 return VK_SUCCESS;
551 }
552
pvr_srv_int_reserve_addr(int fd,void * server_heap,pvr_dev_addr_t addr,uint64_t size,void ** const reservation_out)553 VkResult pvr_srv_int_reserve_addr(int fd,
554 void *server_heap,
555 pvr_dev_addr_t addr,
556 uint64_t size,
557 void **const reservation_out)
558 {
559 struct pvr_srv_devmem_int_reserve_range_cmd cmd = {
560 .server_heap = server_heap,
561 .addr = addr,
562 .size = size,
563 };
564
565 struct pvr_srv_devmem_int_reserve_range_ret ret = {
566 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
567 };
568
569 int result;
570
571 result = pvr_srv_bridge_call(fd,
572 PVR_SRV_BRIDGE_MM,
573 PVR_SRV_BRIDGE_MM_DEVMEMINTRESERVERANGE,
574 &cmd,
575 sizeof(cmd),
576 &ret,
577 sizeof(ret));
578 if (result || ret.error != PVR_SRV_OK) {
579 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
580 "PVR_SRV_BRIDGE_MM_DEVMEMINTRESERVERANGE",
581 ret);
582 }
583
584 *reservation_out = ret.reservation;
585
586 return VK_SUCCESS;
587 }
588
pvr_srv_int_unreserve_addr(int fd,void * reservation)589 void pvr_srv_int_unreserve_addr(int fd, void *reservation)
590 {
591 struct pvr_srv_bridge_in_devmem_int_unreserve_range_cmd cmd = {
592 .reservation = reservation,
593 };
594
595 struct pvr_srv_bridge_in_devmem_int_unreserve_range_ret ret = {
596 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
597 };
598
599 int result;
600
601 result = pvr_srv_bridge_call(fd,
602 PVR_SRV_BRIDGE_MM,
603 PVR_SRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE,
604 &cmd,
605 sizeof(cmd),
606 &ret,
607 sizeof(ret));
608 if (result || ret.error != PVR_SRV_OK) {
609 vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
610 "PVR_SRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE",
611 ret);
612 }
613 }
614
pvr_srv_alloc_pmr(int fd,uint64_t size,uint64_t block_size,uint32_t phy_blocks,uint32_t virt_blocks,uint32_t log2_page_size,uint64_t flags,uint32_t pid,void ** const pmr_out)615 VkResult pvr_srv_alloc_pmr(int fd,
616 uint64_t size,
617 uint64_t block_size,
618 uint32_t phy_blocks,
619 uint32_t virt_blocks,
620 uint32_t log2_page_size,
621 uint64_t flags,
622 uint32_t pid,
623 void **const pmr_out)
624 {
625 const char *annotation = "VK PHYSICAL ALLOCATION";
626 const uint32_t annotation_size =
627 strnlen(annotation, DEVMEM_ANNOTATION_MAX_LEN - 1) + 1;
628 uint32_t mapping_table = 0;
629
630 struct pvr_srv_physmem_new_ram_backed_locked_pmr_cmd cmd = {
631 .size = size,
632 .block_size = block_size,
633 .phy_blocks = phy_blocks,
634 .virt_blocks = virt_blocks,
635 .mapping_table = &mapping_table,
636 .log2_page_size = log2_page_size,
637 .flags = flags,
638 .annotation_size = annotation_size,
639 .annotation = annotation,
640 .pid = pid,
641 .pdump_flags = 0x00000000U,
642 };
643
644 struct pvr_srv_physmem_new_ram_backed_locked_pmr_ret ret = {
645 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
646 };
647
648 int result;
649
650 result = pvr_srv_bridge_call(fd,
651 PVR_SRV_BRIDGE_MM,
652 PVR_SRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR,
653 &cmd,
654 sizeof(cmd),
655 &ret,
656 sizeof(ret));
657 if (result || ret.error != PVR_SRV_OK) {
658 return vk_bridge_err(VK_ERROR_MEMORY_MAP_FAILED,
659 "PVR_SRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR",
660 ret);
661 }
662
663 *pmr_out = ret.pmr;
664
665 return VK_SUCCESS;
666 }
667
pvr_srv_free_pmr(int fd,void * pmr)668 void pvr_srv_free_pmr(int fd, void *pmr)
669 {
670 struct pvr_srv_pmr_unref_unlock_pmr_cmd cmd = {
671 .pmr = pmr,
672 };
673
674 struct pvr_srv_pmr_unref_unlock_pmr_ret ret = {
675 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
676 };
677
678 int result;
679
680 result = pvr_srv_bridge_call(fd,
681 PVR_SRV_BRIDGE_MM,
682 PVR_SRV_BRIDGE_MM_PMRUNREFUNLOCKPMR,
683 &cmd,
684 sizeof(cmd),
685 &ret,
686 sizeof(ret));
687 if (result || ret.error != PVR_SRV_OK) {
688 vk_bridge_err(VK_ERROR_UNKNOWN,
689 "PVR_SRV_BRIDGE_MM_PMRUNREFUNLOCKPMR",
690 ret);
691 }
692 }
693
pvr_srv_int_map_pages(int fd,void * reservation,void * pmr,uint32_t page_count,uint32_t page_offset,uint64_t flags,pvr_dev_addr_t addr)694 VkResult pvr_srv_int_map_pages(int fd,
695 void *reservation,
696 void *pmr,
697 uint32_t page_count,
698 uint32_t page_offset,
699 uint64_t flags,
700 pvr_dev_addr_t addr)
701 {
702 struct pvr_srv_devmem_int_map_pages_cmd cmd = {
703 .reservation = reservation,
704 .pmr = pmr,
705 .page_count = page_count,
706 .page_offset = page_offset,
707 .flags = flags,
708 .addr = addr,
709 };
710
711 struct pvr_srv_devmem_int_map_pages_ret ret = {
712 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
713 };
714
715 int result;
716
717 result = pvr_srv_bridge_call(fd,
718 PVR_SRV_BRIDGE_MM,
719 PVR_SRV_BRIDGE_MM_DEVMEMINTMAPPAGES,
720 &cmd,
721 sizeof(cmd),
722 &ret,
723 sizeof(ret));
724 if (result || ret.error != PVR_SRV_OK) {
725 return vk_bridge_err(VK_ERROR_MEMORY_MAP_FAILED,
726 "PVR_SRV_BRIDGE_MM_DEVMEMINTMAPPAGES",
727 ret);
728 }
729
730 return VK_SUCCESS;
731 }
732
pvr_srv_int_unmap_pages(int fd,void * reservation,pvr_dev_addr_t dev_addr,uint32_t page_count)733 void pvr_srv_int_unmap_pages(int fd,
734 void *reservation,
735 pvr_dev_addr_t dev_addr,
736 uint32_t page_count)
737 {
738 struct pvr_srv_devmem_int_unmap_pages_cmd cmd = {
739 .reservation = reservation,
740 .dev_addr = dev_addr,
741 .page_count = page_count,
742 };
743
744 struct pvr_srv_devmem_int_unmap_pages_ret ret = {
745 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
746 };
747
748 int result;
749
750 result = pvr_srv_bridge_call(fd,
751 PVR_SRV_BRIDGE_MM,
752 PVR_SRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES,
753 &cmd,
754 sizeof(cmd),
755 &ret,
756 sizeof(ret));
757 if (result || ret.error != PVR_SRV_OK) {
758 vk_bridge_err(VK_ERROR_UNKNOWN,
759 "PVR_SRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES",
760 ret);
761 }
762 }
763
pvr_srv_int_map_pmr(int fd,void * server_heap,void * reservation,void * pmr,uint64_t flags,void ** const mapping_out)764 VkResult pvr_srv_int_map_pmr(int fd,
765 void *server_heap,
766 void *reservation,
767 void *pmr,
768 uint64_t flags,
769 void **const mapping_out)
770 {
771 struct pvr_srv_devmem_int_map_pmr_cmd cmd = {
772 .server_heap = server_heap,
773 .reservation = reservation,
774 .pmr = pmr,
775 .flags = flags,
776 };
777
778 struct pvr_srv_devmem_int_map_pmr_ret ret = {
779 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
780 };
781
782 int result;
783
784 result = pvr_srv_bridge_call(fd,
785 PVR_SRV_BRIDGE_MM,
786 PVR_SRV_BRIDGE_MM_DEVMEMINTMAPPMR,
787 &cmd,
788 sizeof(cmd),
789 &ret,
790 sizeof(ret));
791 if (result || ret.error != PVR_SRV_OK) {
792 return vk_bridge_err(VK_ERROR_MEMORY_MAP_FAILED,
793 "PVR_SRV_BRIDGE_MM_DEVMEMINTMAPPMR",
794 ret);
795 }
796
797 *mapping_out = ret.mapping;
798
799 return VK_SUCCESS;
800 }
801
pvr_srv_int_unmap_pmr(int fd,void * mapping)802 void pvr_srv_int_unmap_pmr(int fd, void *mapping)
803 {
804 struct pvr_srv_devmem_int_unmap_pmr_cmd cmd = {
805 .mapping = mapping,
806 };
807
808 struct pvr_srv_devmem_int_unmap_pmr_ret ret = {
809 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
810 };
811
812 int result;
813
814 result = pvr_srv_bridge_call(fd,
815 PVR_SRV_BRIDGE_MM,
816 PVR_SRV_BRIDGE_MM_DEVMEMINTUNMAPPMR,
817 &cmd,
818 sizeof(cmd),
819 &ret,
820 sizeof(ret));
821 if (result || ret.error != PVR_SRV_OK) {
822 vk_bridge_err(VK_ERROR_UNKNOWN,
823 "PVR_SRV_BRIDGE_MM_DEVMEMINTUNMAPPMR",
824 ret);
825 }
826 }
827
pvr_srv_physmem_import_dmabuf(int fd,int buffer_fd,uint64_t flags,void ** const pmr_out,uint64_t * const size_out,uint64_t * const align_out)828 VkResult pvr_srv_physmem_import_dmabuf(int fd,
829 int buffer_fd,
830 uint64_t flags,
831 void **const pmr_out,
832 uint64_t *const size_out,
833 uint64_t *const align_out)
834 {
835 struct pvr_srv_phys_mem_import_dmabuf_cmd cmd = {
836 .buffer_fd = buffer_fd,
837 .flags = flags,
838 .name_size = 0,
839 .name = NULL,
840 };
841
842 struct pvr_srv_phys_mem_import_dmabuf_ret ret = {
843 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
844 };
845
846 int result;
847
848 result = pvr_srv_bridge_call(fd,
849 PVR_SRV_BRIDGE_DMABUF,
850 PVR_SRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF,
851 &cmd,
852 sizeof(cmd),
853 &ret,
854 sizeof(ret));
855 if (result || ret.error != PVR_SRV_OK) {
856 return vk_bridge_err(VK_ERROR_INVALID_EXTERNAL_HANDLE,
857 "PVR_SRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF",
858 ret);
859 }
860
861 *pmr_out = ret.pmr;
862 *size_out = ret.size;
863 *align_out = ret.align;
864
865 return VK_SUCCESS;
866 }
867
pvr_srv_physmem_export_dmabuf(int fd,void * pmr,int * const fd_out)868 VkResult pvr_srv_physmem_export_dmabuf(int fd, void *pmr, int *const fd_out)
869 {
870 struct pvr_srv_phys_mem_export_dmabuf_cmd cmd = {
871 .pmr = pmr,
872 };
873
874 struct pvr_srv_phys_mem_export_dmabuf_ret ret = {
875 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
876 };
877
878 int result;
879
880 result = pvr_srv_bridge_call(fd,
881 PVR_SRV_BRIDGE_DMABUF,
882 PVR_SRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF,
883 &cmd,
884 sizeof(cmd),
885 &ret,
886 sizeof(ret));
887 if (result || ret.error != PVR_SRV_OK) {
888 return vk_bridge_err(VK_ERROR_OUT_OF_HOST_MEMORY,
889 "PVR_SRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF",
890 ret);
891 }
892
893 *fd_out = ret.fd;
894
895 return VK_SUCCESS;
896 }
897
pvr_srv_rgx_create_transfer_context(int fd,uint32_t priority,uint32_t reset_framework_cmd_size,uint8_t * reset_framework_cmd,void * priv_data,uint32_t packed_ccb_size_u8888,uint32_t context_flags,uint64_t robustness_address,void ** const cli_pmr_out,void ** const usc_pmr_out,void ** const transfer_context_out)898 VkResult pvr_srv_rgx_create_transfer_context(int fd,
899 uint32_t priority,
900 uint32_t reset_framework_cmd_size,
901 uint8_t *reset_framework_cmd,
902 void *priv_data,
903 uint32_t packed_ccb_size_u8888,
904 uint32_t context_flags,
905 uint64_t robustness_address,
906 void **const cli_pmr_out,
907 void **const usc_pmr_out,
908 void **const transfer_context_out)
909 {
910 struct pvr_srv_rgx_create_transfer_context_cmd cmd = {
911 .robustness_address = robustness_address,
912 .priority = priority,
913 .reset_framework_cmd_size = reset_framework_cmd_size,
914 .reset_framework_cmd = reset_framework_cmd,
915 .priv_data = priv_data,
916 .packed_ccb_size_u8888 = packed_ccb_size_u8888,
917 .context_flags = context_flags,
918 };
919
920 struct pvr_srv_rgx_create_transfer_context_ret ret = {
921 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
922 };
923
924 int result;
925
926 result = pvr_srv_bridge_call(fd,
927 PVR_SRV_BRIDGE_RGXTQ,
928 PVR_SRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT,
929 &cmd,
930 sizeof(cmd),
931 &ret,
932 sizeof(ret));
933 if (result || ret.error != PVR_SRV_OK) {
934 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
935 "PVR_SRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT",
936 ret);
937 }
938
939 if (cli_pmr_out)
940 *cli_pmr_out = ret.cli_pmr_mem;
941
942 if (usc_pmr_out)
943 *usc_pmr_out = ret.usc_pmr_mem;
944
945 *transfer_context_out = ret.transfer_context;
946
947 return VK_SUCCESS;
948 }
949
pvr_srv_rgx_destroy_transfer_context(int fd,void * transfer_context)950 void pvr_srv_rgx_destroy_transfer_context(int fd, void *transfer_context)
951 {
952 struct pvr_srv_rgx_destroy_transfer_context_cmd cmd = {
953 .transfer_context = transfer_context,
954 };
955
956 struct pvr_srv_rgx_destroy_transfer_context_ret ret = {
957 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
958 };
959
960 int result;
961
962 result = pvr_srv_bridge_call(fd,
963 PVR_SRV_BRIDGE_RGXTQ,
964 PVR_SRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT,
965 &cmd,
966 sizeof(cmd),
967 &ret,
968 sizeof(ret));
969 if (result || ret.error != PVR_SRV_OK) {
970 vk_bridge_err(VK_ERROR_UNKNOWN,
971 "PVR_SRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT",
972 ret);
973 }
974 }
975
pvr_srv_rgx_submit_transfer2(int fd,void * transfer_context,uint32_t prepare_count,uint32_t * client_update_count,void *** update_ufo_sync_prim_block,uint32_t ** update_sync_offset,uint32_t ** update_value,int32_t check_fence,int32_t update_timeline_2d,int32_t update_timeline_3d,char * update_fence_name,uint32_t * cmd_size,uint8_t ** fw_command,uint32_t * tq_prepare_flags,uint32_t ext_job_ref,uint32_t sync_pmr_count,uint32_t * sync_pmr_flags,void ** sync_pmrs,int32_t * const update_fence_2d_out,int32_t * const update_fence_3d_out)976 VkResult pvr_srv_rgx_submit_transfer2(int fd,
977 void *transfer_context,
978 uint32_t prepare_count,
979 uint32_t *client_update_count,
980 void ***update_ufo_sync_prim_block,
981 uint32_t **update_sync_offset,
982 uint32_t **update_value,
983 int32_t check_fence,
984 int32_t update_timeline_2d,
985 int32_t update_timeline_3d,
986 char *update_fence_name,
987 uint32_t *cmd_size,
988 uint8_t **fw_command,
989 uint32_t *tq_prepare_flags,
990 uint32_t ext_job_ref,
991 uint32_t sync_pmr_count,
992 uint32_t *sync_pmr_flags,
993 void **sync_pmrs,
994 int32_t *const update_fence_2d_out,
995 int32_t *const update_fence_3d_out)
996 {
997 struct pvr_srv_rgx_submit_transfer2_cmd cmd = {
998 .transfer_context = transfer_context,
999 .client_update_count = client_update_count,
1000 .cmd_size = cmd_size,
1001 .sync_pmr_flags = sync_pmr_flags,
1002 .tq_prepare_flags = tq_prepare_flags,
1003 .update_sync_offset = update_sync_offset,
1004 .update_value = update_value,
1005 .fw_command = fw_command,
1006 .update_fence_name = update_fence_name,
1007 .sync_pmrs = sync_pmrs,
1008 .update_ufo_sync_prim_block = update_ufo_sync_prim_block,
1009 .update_timeline_2d = update_timeline_2d,
1010 .update_timeline_3d = update_timeline_3d,
1011 .check_fence = check_fence,
1012 .ext_job_ref = ext_job_ref,
1013 .prepare_count = prepare_count,
1014 .sync_pmr_count = sync_pmr_count,
1015 };
1016
1017 struct pvr_srv_rgx_submit_transfer2_ret ret = {
1018 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1019 };
1020
1021 int result;
1022
1023 result = pvr_srv_bridge_call(fd,
1024 PVR_SRV_BRIDGE_RGXTQ,
1025 PVR_SRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2,
1026 &cmd,
1027 sizeof(cmd),
1028 &ret,
1029 sizeof(ret));
1030 if (result || ret.error != PVR_SRV_OK) {
1031 return vk_bridge_err(VK_ERROR_OUT_OF_DEVICE_MEMORY,
1032 "PVR_SRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2",
1033 ret);
1034 }
1035
1036 if (update_fence_2d_out)
1037 *update_fence_2d_out = ret.update_fence_2d;
1038
1039 if (update_fence_3d_out)
1040 *update_fence_3d_out = ret.update_fence_3d;
1041
1042 return VK_SUCCESS;
1043 }
1044
1045 VkResult
pvr_srv_rgx_create_compute_context(int fd,uint32_t priority,uint32_t reset_framework_cmd_size,uint8_t * reset_framework_cmd,void * priv_data,uint32_t static_compute_context_state_size,uint8_t * static_compute_context_state,uint32_t packed_ccb_size,uint32_t context_flags,uint64_t robustness_address,uint32_t max_deadline_ms,void ** const compute_context_out)1046 pvr_srv_rgx_create_compute_context(int fd,
1047 uint32_t priority,
1048 uint32_t reset_framework_cmd_size,
1049 uint8_t *reset_framework_cmd,
1050 void *priv_data,
1051 uint32_t static_compute_context_state_size,
1052 uint8_t *static_compute_context_state,
1053 uint32_t packed_ccb_size,
1054 uint32_t context_flags,
1055 uint64_t robustness_address,
1056 uint32_t max_deadline_ms,
1057 void **const compute_context_out)
1058 {
1059 struct pvr_srv_rgx_create_compute_context_cmd cmd = {
1060 .priority = priority,
1061 .reset_framework_cmd_size = reset_framework_cmd_size,
1062 .reset_framework_cmd = reset_framework_cmd,
1063 .priv_data = priv_data,
1064 .static_compute_context_state_size = static_compute_context_state_size,
1065 .static_compute_context_state = static_compute_context_state,
1066 .packed_ccb_size = packed_ccb_size,
1067 .context_flags = context_flags,
1068 .robustness_address = robustness_address,
1069 .max_deadline_ms = max_deadline_ms,
1070 };
1071
1072 struct pvr_srv_rgx_create_compute_context_ret ret = {
1073 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1074 };
1075
1076 int result;
1077
1078 result = pvr_srv_bridge_call(fd,
1079 PVR_SRV_BRIDGE_RGXCMP,
1080 PVR_SRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT,
1081 &cmd,
1082 sizeof(cmd),
1083 &ret,
1084 sizeof(ret));
1085 if (result || ret.error != PVR_SRV_OK) {
1086 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
1087 "PVR_SRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT",
1088 ret);
1089 }
1090
1091 *compute_context_out = ret.compute_context;
1092
1093 return VK_SUCCESS;
1094 }
1095
pvr_srv_rgx_destroy_compute_context(int fd,void * compute_context)1096 void pvr_srv_rgx_destroy_compute_context(int fd, void *compute_context)
1097 {
1098 struct pvr_srv_rgx_destroy_compute_context_cmd cmd = {
1099 .compute_context = compute_context,
1100 };
1101
1102 struct pvr_srv_rgx_destroy_compute_context_ret ret = {
1103 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1104 };
1105
1106 int result;
1107
1108 result = pvr_srv_bridge_call(fd,
1109 PVR_SRV_BRIDGE_RGXCMP,
1110 PVR_SRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT,
1111 &cmd,
1112 sizeof(cmd),
1113 &ret,
1114 sizeof(ret));
1115 if (result || ret.error != PVR_SRV_OK) {
1116 vk_bridge_err(VK_ERROR_UNKNOWN,
1117 "PVR_SRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT",
1118 ret);
1119 }
1120 }
1121
pvr_srv_rgx_kick_compute2(int fd,void * compute_context,uint32_t client_update_count,void ** client_update_ufo_sync_prim_block,uint32_t * client_update_offset,uint32_t * client_update_value,int32_t check_fence,int32_t update_timeline,uint32_t cmd_size,uint8_t * cdm_cmd,uint32_t ext_job_ref,uint32_t sync_pmr_count,uint32_t * sync_pmr_flags,void ** sync_pmrs,uint32_t num_work_groups,uint32_t num_work_items,uint32_t pdump_flags,uint64_t max_deadline_us,char * update_fence_name,int32_t * const update_fence_out)1122 VkResult pvr_srv_rgx_kick_compute2(int fd,
1123 void *compute_context,
1124 uint32_t client_update_count,
1125 void **client_update_ufo_sync_prim_block,
1126 uint32_t *client_update_offset,
1127 uint32_t *client_update_value,
1128 int32_t check_fence,
1129 int32_t update_timeline,
1130 uint32_t cmd_size,
1131 uint8_t *cdm_cmd,
1132 uint32_t ext_job_ref,
1133 uint32_t sync_pmr_count,
1134 uint32_t *sync_pmr_flags,
1135 void **sync_pmrs,
1136 uint32_t num_work_groups,
1137 uint32_t num_work_items,
1138 uint32_t pdump_flags,
1139 uint64_t max_deadline_us,
1140 char *update_fence_name,
1141 int32_t *const update_fence_out)
1142 {
1143 struct pvr_srv_rgx_kick_cdm2_cmd cmd = {
1144 .max_deadline_us = max_deadline_us,
1145 .compute_context = compute_context,
1146 .client_update_offset = client_update_offset,
1147 .client_update_value = client_update_value,
1148 .sync_pmr_flags = sync_pmr_flags,
1149 .cdm_cmd = cdm_cmd,
1150 .update_fence_name = update_fence_name,
1151 .client_update_ufo_sync_prim_block = client_update_ufo_sync_prim_block,
1152 .sync_pmrs = sync_pmrs,
1153 .check_fence = check_fence,
1154 .update_timeline = update_timeline,
1155 .client_update_count = client_update_count,
1156 .cmd_size = cmd_size,
1157 .ext_job_ref = ext_job_ref,
1158 .num_work_groups = num_work_groups,
1159 .num_work_items = num_work_items,
1160 .pdump_flags = pdump_flags,
1161 .sync_pmr_count = sync_pmr_count,
1162 };
1163
1164 struct pvr_srv_rgx_kick_cdm2_ret ret = {
1165 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1166 };
1167
1168 int result;
1169
1170 result = pvr_srv_bridge_call(fd,
1171 PVR_SRV_BRIDGE_RGXCMP,
1172 PVR_SRV_BRIDGE_RGXCMP_RGXKICKCDM2,
1173 &cmd,
1174 sizeof(cmd),
1175 &ret,
1176 sizeof(ret));
1177 if (result || ret.error != PVR_SRV_OK) {
1178 return vk_bridge_err(VK_ERROR_OUT_OF_DEVICE_MEMORY,
1179 "PVR_SRV_BRIDGE_RGXCMP_RGXKICKCDM2",
1180 ret);
1181 }
1182
1183 *update_fence_out = ret.update_fence;
1184
1185 return VK_SUCCESS;
1186 }
1187
1188 VkResult
pvr_srv_rgx_create_hwrt_dataset(int fd,uint64_t flipped_multi_sample_ctl,uint64_t multi_sample_ctl,const pvr_dev_addr_t * macrotile_array_dev_addrs,const pvr_dev_addr_t * pm_mlist_dev_addrs,const pvr_dev_addr_t * rtc_dev_addrs,const pvr_dev_addr_t * rgn_header_dev_addrs,const pvr_dev_addr_t * tail_ptrs_dev_addrs,const pvr_dev_addr_t * vheap_table_dev_adds,void ** free_lists,uint32_t isp_merge_lower_x,uint32_t isp_merge_lower_y,uint32_t isp_merge_scale_x,uint32_t isp_merge_scale_y,uint32_t isp_merge_upper_x,uint32_t isp_merge_upper_y,uint32_t isp_mtile_size,uint32_t mtile_stride,uint32_t ppp_screen,uint32_t rgn_header_size,uint32_t te_aa,uint32_t te_mtile1,uint32_t te_mtile2,uint32_t te_screen,uint32_t tpc_size,uint32_t tpc_stride,uint16_t max_rts,void ** hwrt_dataset_out)1189 pvr_srv_rgx_create_hwrt_dataset(int fd,
1190 uint64_t flipped_multi_sample_ctl,
1191 uint64_t multi_sample_ctl,
1192 const pvr_dev_addr_t *macrotile_array_dev_addrs,
1193 const pvr_dev_addr_t *pm_mlist_dev_addrs,
1194 const pvr_dev_addr_t *rtc_dev_addrs,
1195 const pvr_dev_addr_t *rgn_header_dev_addrs,
1196 const pvr_dev_addr_t *tail_ptrs_dev_addrs,
1197 const pvr_dev_addr_t *vheap_table_dev_adds,
1198 void **free_lists,
1199 uint32_t isp_merge_lower_x,
1200 uint32_t isp_merge_lower_y,
1201 uint32_t isp_merge_scale_x,
1202 uint32_t isp_merge_scale_y,
1203 uint32_t isp_merge_upper_x,
1204 uint32_t isp_merge_upper_y,
1205 uint32_t isp_mtile_size,
1206 uint32_t mtile_stride,
1207 uint32_t ppp_screen,
1208 uint32_t rgn_header_size,
1209 uint32_t te_aa,
1210 uint32_t te_mtile1,
1211 uint32_t te_mtile2,
1212 uint32_t te_screen,
1213 uint32_t tpc_size,
1214 uint32_t tpc_stride,
1215 uint16_t max_rts,
1216 void **hwrt_dataset_out)
1217 {
1218 /* Note that hwrt_dataset_out is passed in the cmd struct which the kernel
1219 * writes to. There's also a hwrt_dataset in the ret struct but we're not
1220 * going to use it since it's the same.
1221 */
1222 struct pvr_srv_rgx_create_hwrt_dataset_cmd cmd = {
1223 .flipped_multi_sample_ctl = flipped_multi_sample_ctl,
1224 .multi_sample_ctl = multi_sample_ctl,
1225 .macrotile_array_dev_addrs = macrotile_array_dev_addrs,
1226 .pm_mlist_dev_addrs = pm_mlist_dev_addrs,
1227 .rtc_dev_addrs = rtc_dev_addrs,
1228 .rgn_header_dev_addrs = rgn_header_dev_addrs,
1229 .tail_ptrs_dev_addrs = tail_ptrs_dev_addrs,
1230 .vheap_table_dev_adds = vheap_table_dev_adds,
1231 .hwrt_dataset = hwrt_dataset_out,
1232 .free_lists = free_lists,
1233 .isp_merge_lower_x = isp_merge_lower_x,
1234 .isp_merge_lower_y = isp_merge_lower_y,
1235 .isp_merge_scale_x = isp_merge_scale_x,
1236 .isp_merge_scale_y = isp_merge_scale_y,
1237 .isp_merge_upper_x = isp_merge_upper_x,
1238 .isp_merge_upper_y = isp_merge_upper_y,
1239 .isp_mtile_size = isp_mtile_size,
1240 .mtile_stride = mtile_stride,
1241 .ppp_screen = ppp_screen,
1242 .rgn_header_size = rgn_header_size,
1243 .te_aa = te_aa,
1244 .te_mtile1 = te_mtile1,
1245 .te_mtile2 = te_mtile2,
1246 .te_screen = te_screen,
1247 .tpc_size = tpc_size,
1248 .tpc_stride = tpc_stride,
1249 .max_rts = max_rts,
1250 };
1251
1252 struct pvr_srv_rgx_create_hwrt_dataset_ret ret = {
1253 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1254 };
1255
1256 int result;
1257
1258 result = pvr_srv_bridge_call(fd,
1259 PVR_SRV_BRIDGE_RGXTA3D,
1260 PVR_SRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET,
1261 &cmd,
1262 sizeof(cmd),
1263 &ret,
1264 sizeof(ret));
1265 if (result || ret.error != PVR_SRV_OK) {
1266 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
1267 "PVR_SRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET",
1268 ret);
1269 }
1270
1271 VG(VALGRIND_MAKE_MEM_DEFINED(cmd.hwrt_dataset,
1272 sizeof(*cmd.hwrt_dataset) *
1273 ROGUE_FWIF_NUM_RTDATAS));
1274
1275 return VK_SUCCESS;
1276 }
1277
pvr_srv_rgx_destroy_hwrt_dataset(int fd,void * hwrt_dataset)1278 void pvr_srv_rgx_destroy_hwrt_dataset(int fd, void *hwrt_dataset)
1279 {
1280 struct pvr_srv_rgx_destroy_hwrt_dataset_cmd cmd = {
1281 .hwrt_dataset = hwrt_dataset,
1282 };
1283
1284 struct pvr_srv_rgx_destroy_hwrt_dataset_ret ret = {
1285 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1286 };
1287
1288 int result;
1289
1290 result = pvr_srv_bridge_call(fd,
1291 PVR_SRV_BRIDGE_RGXTA3D,
1292 PVR_SRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET,
1293 &cmd,
1294 sizeof(cmd),
1295 &ret,
1296 sizeof(ret));
1297 if (result || ret.error != PVR_SRV_OK) {
1298 vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
1299 "PVR_SRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET",
1300 ret);
1301 }
1302 }
1303
pvr_srv_rgx_create_free_list(int fd,void * mem_ctx_priv_data,uint32_t max_free_list_pages,uint32_t init_free_list_pages,uint32_t grow_free_list_pages,uint32_t grow_param_threshold,void * global_free_list,enum pvr_srv_bool free_list_check,pvr_dev_addr_t free_list_dev_addr,void * free_list_pmr,uint64_t pmr_offset,void ** const cleanup_cookie_out)1304 VkResult pvr_srv_rgx_create_free_list(int fd,
1305 void *mem_ctx_priv_data,
1306 uint32_t max_free_list_pages,
1307 uint32_t init_free_list_pages,
1308 uint32_t grow_free_list_pages,
1309 uint32_t grow_param_threshold,
1310 void *global_free_list,
1311 enum pvr_srv_bool free_list_check,
1312 pvr_dev_addr_t free_list_dev_addr,
1313 void *free_list_pmr,
1314 uint64_t pmr_offset,
1315 void **const cleanup_cookie_out)
1316 {
1317 struct pvr_srv_rgx_create_free_list_cmd cmd = {
1318 .free_list_dev_addr = free_list_dev_addr,
1319 .pmr_offset = pmr_offset,
1320 .mem_ctx_priv_data = mem_ctx_priv_data,
1321 .free_list_pmr = free_list_pmr,
1322 .global_free_list = global_free_list,
1323 .free_list_check = free_list_check,
1324 .grow_free_list_pages = grow_free_list_pages,
1325 .grow_param_threshold = grow_param_threshold,
1326 .init_free_list_pages = init_free_list_pages,
1327 .max_free_list_pages = max_free_list_pages,
1328 };
1329
1330 struct pvr_srv_rgx_create_free_list_ret ret = {
1331 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1332 };
1333
1334 int result;
1335
1336 result = pvr_srv_bridge_call(fd,
1337 PVR_SRV_BRIDGE_RGXTA3D,
1338 PVR_SRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST,
1339 &cmd,
1340 sizeof(cmd),
1341 &ret,
1342 sizeof(ret));
1343 if (result || ret.error != PVR_SRV_OK) {
1344 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
1345 "PVR_SRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST",
1346 ret);
1347 }
1348
1349 *cleanup_cookie_out = ret.cleanup_cookie;
1350
1351 return VK_SUCCESS;
1352 }
1353
pvr_srv_rgx_destroy_free_list(int fd,void * cleanup_cookie)1354 void pvr_srv_rgx_destroy_free_list(int fd, void *cleanup_cookie)
1355 {
1356 struct pvr_srv_rgx_destroy_free_list_cmd cmd = {
1357 .cleanup_cookie = cleanup_cookie,
1358 };
1359
1360 struct pvr_srv_rgx_destroy_free_list_ret ret = {
1361 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1362 };
1363
1364 int result;
1365
1366 /* FIXME: Do we want to propagate the retry error up the call chain so that
1367 * we can do something better than busy wait or is the expectation that we
1368 * should never get into this situation because the driver doesn't attempt
1369 * to free any resources while they're in use?
1370 */
1371 do {
1372 result = pvr_srv_bridge_call(fd,
1373 PVR_SRV_BRIDGE_RGXTA3D,
1374 PVR_SRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST,
1375 &cmd,
1376 sizeof(cmd),
1377 &ret,
1378 sizeof(ret));
1379 } while (result == PVR_SRV_ERROR_RETRY);
1380
1381 if (result || ret.error != PVR_SRV_OK) {
1382 vk_bridge_err(VK_ERROR_UNKNOWN,
1383 "PVR_SRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST",
1384 ret);
1385 }
1386 }
1387
1388 VkResult
pvr_srv_rgx_create_render_context(int fd,uint32_t priority,pvr_dev_addr_t vdm_callstack_addr,uint32_t call_stack_depth,uint32_t reset_framework_cmd_size,uint8_t * reset_framework_cmd,void * priv_data,uint32_t static_render_context_state_size,uint8_t * static_render_context_state,uint32_t packed_ccb_size,uint32_t context_flags,uint64_t robustness_address,uint32_t max_geom_deadline_ms,uint32_t max_frag_deadline_ms,void ** const render_context_out)1389 pvr_srv_rgx_create_render_context(int fd,
1390 uint32_t priority,
1391 pvr_dev_addr_t vdm_callstack_addr,
1392 uint32_t call_stack_depth,
1393 uint32_t reset_framework_cmd_size,
1394 uint8_t *reset_framework_cmd,
1395 void *priv_data,
1396 uint32_t static_render_context_state_size,
1397 uint8_t *static_render_context_state,
1398 uint32_t packed_ccb_size,
1399 uint32_t context_flags,
1400 uint64_t robustness_address,
1401 uint32_t max_geom_deadline_ms,
1402 uint32_t max_frag_deadline_ms,
1403 void **const render_context_out)
1404 {
1405 struct pvr_srv_rgx_create_render_context_cmd cmd = {
1406 .priority = priority,
1407 .vdm_callstack_addr = vdm_callstack_addr,
1408 .call_stack_depth = call_stack_depth,
1409 .reset_framework_cmd_size = reset_framework_cmd_size,
1410 .reset_framework_cmd = reset_framework_cmd,
1411 .priv_data = priv_data,
1412 .static_render_context_state_size = static_render_context_state_size,
1413 .static_render_context_state = static_render_context_state,
1414 .packed_ccb_size = packed_ccb_size,
1415 .context_flags = context_flags,
1416 .robustness_address = robustness_address,
1417 .max_ta_deadline_ms = max_geom_deadline_ms,
1418 .max_3d_deadline_ms = max_frag_deadline_ms,
1419 };
1420
1421 struct pvr_srv_rgx_create_render_context_ret ret = {
1422 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1423 };
1424
1425 int result;
1426
1427 result = pvr_srv_bridge_call(fd,
1428 PVR_SRV_BRIDGE_RGXTA3D,
1429 PVR_SRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT,
1430 &cmd,
1431 sizeof(cmd),
1432 &ret,
1433 sizeof(ret));
1434 if (result || ret.error != PVR_SRV_OK) {
1435 return vk_bridge_err(VK_ERROR_INITIALIZATION_FAILED,
1436 "PVR_SRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT",
1437 ret);
1438 }
1439
1440 *render_context_out = ret.render_context;
1441
1442 return VK_SUCCESS;
1443 }
1444
pvr_srv_rgx_destroy_render_context(int fd,void * render_context)1445 void pvr_srv_rgx_destroy_render_context(int fd, void *render_context)
1446 {
1447 struct pvr_srv_rgx_destroy_render_context_cmd cmd = {
1448 .render_context = render_context,
1449 };
1450
1451 struct pvr_srv_rgx_destroy_render_context_ret ret = {
1452 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1453 };
1454
1455 int result;
1456
1457 result = pvr_srv_bridge_call(fd,
1458 PVR_SRV_BRIDGE_RGXTA3D,
1459 PVR_SRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT,
1460 &cmd,
1461 sizeof(cmd),
1462 &ret,
1463 sizeof(ret));
1464 if (result || ret.error != PVR_SRV_OK) {
1465 vk_bridge_err(VK_ERROR_UNKNOWN,
1466 "PVR_SRV_BRIDGE_RGXTA3D_RGXDESTORYRENDERCONTEXT",
1467 ret);
1468 }
1469 }
1470
pvr_srv_rgx_kick_render2(int fd,void * render_ctx,uint32_t client_geom_fence_count,void ** client_geom_fence_sync_prim_block,uint32_t * client_geom_fence_sync_offset,uint32_t * client_geom_fence_value,uint32_t client_geom_update_count,void ** client_geom_update_sync_prim_block,uint32_t * client_geom_update_sync_offset,uint32_t * client_geom_update_value,uint32_t client_frag_update_count,void ** client_frag_update_sync_prim_block,uint32_t * client_frag_update_sync_offset,uint32_t * client_frag_update_value,void * pr_fence_ufo_sync_prim_block,uint32_t client_pr_fence_ufo_sync_offset,uint32_t client_pr_fence_value,int32_t check_fence,int32_t update_timeline,int32_t * const update_fence_out,char * update_fence_name,int32_t check_fence_frag,int32_t update_timeline_frag,int32_t * const update_fence_frag_out,char * update_fence_name_frag,uint32_t cmd_geom_size,uint8_t * cmd_geom,uint32_t cmd_frag_pr_size,uint8_t * cmd_frag_pr,uint32_t cmd_frag_size,uint8_t * cmd_frag,uint32_t ext_job_ref,bool kick_geom,bool kick_pr,bool kick_frag,bool abort,uint32_t pdump_flags,void * hw_rt_dataset,void * zs_buffer,void * msaa_scratch_buffer,uint32_t sync_pmr_count,uint32_t * sync_pmr_flags,void ** sync_pmrs,uint32_t render_target_size,uint32_t num_draw_calls,uint32_t num_indices,uint32_t num_mrts,uint64_t deadline)1471 VkResult pvr_srv_rgx_kick_render2(int fd,
1472 void *render_ctx,
1473 uint32_t client_geom_fence_count,
1474 void **client_geom_fence_sync_prim_block,
1475 uint32_t *client_geom_fence_sync_offset,
1476 uint32_t *client_geom_fence_value,
1477 uint32_t client_geom_update_count,
1478 void **client_geom_update_sync_prim_block,
1479 uint32_t *client_geom_update_sync_offset,
1480 uint32_t *client_geom_update_value,
1481 uint32_t client_frag_update_count,
1482 void **client_frag_update_sync_prim_block,
1483 uint32_t *client_frag_update_sync_offset,
1484 uint32_t *client_frag_update_value,
1485 void *pr_fence_ufo_sync_prim_block,
1486 uint32_t client_pr_fence_ufo_sync_offset,
1487 uint32_t client_pr_fence_value,
1488 int32_t check_fence,
1489 int32_t update_timeline,
1490 int32_t *const update_fence_out,
1491 char *update_fence_name,
1492 int32_t check_fence_frag,
1493 int32_t update_timeline_frag,
1494 int32_t *const update_fence_frag_out,
1495 char *update_fence_name_frag,
1496 uint32_t cmd_geom_size,
1497 uint8_t *cmd_geom,
1498 uint32_t cmd_frag_pr_size,
1499 uint8_t *cmd_frag_pr,
1500 uint32_t cmd_frag_size,
1501 uint8_t *cmd_frag,
1502 uint32_t ext_job_ref,
1503 bool kick_geom,
1504 bool kick_pr,
1505 bool kick_frag,
1506 bool abort,
1507 uint32_t pdump_flags,
1508 void *hw_rt_dataset,
1509 void *zs_buffer,
1510 void *msaa_scratch_buffer,
1511 uint32_t sync_pmr_count,
1512 uint32_t *sync_pmr_flags,
1513 void **sync_pmrs,
1514 uint32_t render_target_size,
1515 uint32_t num_draw_calls,
1516 uint32_t num_indices,
1517 uint32_t num_mrts,
1518 uint64_t deadline)
1519 {
1520 struct pvr_srv_rgx_kick_ta3d2_cmd cmd = {
1521 .deadline = deadline,
1522 .hw_rt_dataset = hw_rt_dataset,
1523 .msaa_scratch_buffer = msaa_scratch_buffer,
1524 .pr_fence_ufo_sync_prim_block = pr_fence_ufo_sync_prim_block,
1525 .render_ctx = render_ctx,
1526 .zs_buffer = zs_buffer,
1527 .client_3d_update_sync_offset = client_frag_update_sync_offset,
1528 .client_3d_update_value = client_frag_update_value,
1529 .client_ta_fence_sync_offset = client_geom_fence_sync_offset,
1530 .client_ta_fence_value = client_geom_fence_value,
1531 .client_ta_update_sync_offset = client_geom_update_sync_offset,
1532 .client_ta_update_value = client_geom_update_value,
1533 .sync_pmr_flags = sync_pmr_flags,
1534 .cmd_3d = cmd_frag,
1535 .cmd_3d_pr = cmd_frag_pr,
1536 .cmd_ta = cmd_geom,
1537 .update_fence_name = update_fence_name,
1538 .update_fence_name_3d = update_fence_name_frag,
1539 .client_3d_update_sync_prim_block = client_frag_update_sync_prim_block,
1540 .client_ta_fence_sync_prim_block = client_geom_fence_sync_prim_block,
1541 .client_ta_update_sync_prim_block = client_geom_update_sync_prim_block,
1542 .sync_pmrs = sync_pmrs,
1543 .abort = abort,
1544 .kick_3d = kick_frag,
1545 .kick_pr = kick_pr,
1546 .kick_ta = kick_geom,
1547 .check_fence = check_fence,
1548 .check_fence_3d = check_fence_frag,
1549 .update_timeline = update_timeline,
1550 .update_timeline_3d = update_timeline_frag,
1551 .cmd_3d_size = cmd_frag_size,
1552 .cmd_3d_pr_size = cmd_frag_pr_size,
1553 .client_3d_update_count = client_frag_update_count,
1554 .client_ta_fence_count = client_geom_fence_count,
1555 .client_ta_update_count = client_geom_update_count,
1556 .ext_job_ref = ext_job_ref,
1557 .client_pr_fence_ufo_sync_offset = client_pr_fence_ufo_sync_offset,
1558 .client_pr_fence_value = client_pr_fence_value,
1559 .num_draw_calls = num_draw_calls,
1560 .num_indices = num_indices,
1561 .num_mrts = num_mrts,
1562 .pdump_flags = pdump_flags,
1563 .render_target_size = render_target_size,
1564 .sync_pmr_count = sync_pmr_count,
1565 .cmd_ta_size = cmd_geom_size,
1566 };
1567
1568 struct pvr_srv_rgx_kick_ta3d2_ret ret = {
1569 .error = PVR_SRV_ERROR_BRIDGE_CALL_FAILED,
1570 .update_fence = -1,
1571 .update_fence_3d = -1,
1572 };
1573
1574 int result;
1575
1576 result = pvr_srv_bridge_call(fd,
1577 PVR_SRV_BRIDGE_RGXTA3D,
1578 PVR_SRV_BRIDGE_RGXTA3D_RGXKICKTA3D2,
1579 &cmd,
1580 sizeof(cmd),
1581 &ret,
1582 sizeof(ret));
1583 if (result || ret.error != PVR_SRV_OK) {
1584 /* There is no 'retry' VkResult, so treat it as VK_NOT_READY instead. */
1585 if (result == PVR_SRV_ERROR_RETRY)
1586 return VK_NOT_READY;
1587
1588 return vk_bridge_err(VK_ERROR_OUT_OF_DEVICE_MEMORY,
1589 "PVR_SRV_BRIDGE_RGXTA3D_RGXKICKTA3D2",
1590 ret);
1591 }
1592
1593 *update_fence_out = ret.update_fence;
1594 *update_fence_frag_out = ret.update_fence_3d;
1595
1596 return VK_SUCCESS;
1597 }
1598