1 /*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 *
5 * based in part on virgl which is:
6 * Copyright 2014, 2015 Red Hat.
7 */
8
9 #include <errno.h>
10 #include <netinet/in.h>
11 #include <poll.h>
12 #include <sys/mman.h>
13 #include <sys/socket.h>
14 #include <sys/types.h>
15 #include <sys/un.h>
16 #include <unistd.h>
17
18 #include "util/os_file.h"
19 #include "util/os_misc.h"
20 #include "util/sparse_array.h"
21 #include "util/u_process.h"
22 #define VIRGL_RENDERER_UNSTABLE_APIS
23 #include "virtio-gpu/virglrenderer_hw.h"
24 #include "vtest/vtest_protocol.h"
25
26 #include "vn_renderer_internal.h"
27
28 #define VTEST_PCI_VENDOR_ID 0x1af4
29 #define VTEST_PCI_DEVICE_ID 0x1050
30
31 struct vtest;
32
33 struct vtest_shmem {
34 struct vn_renderer_shmem base;
35 };
36
37 struct vtest_bo {
38 struct vn_renderer_bo base;
39
40 uint32_t blob_flags;
41 /* might be closed after mmap */
42 int res_fd;
43 };
44
45 struct vtest_sync {
46 struct vn_renderer_sync base;
47 };
48
49 struct vtest {
50 struct vn_renderer base;
51
52 struct vn_instance *instance;
53
54 mtx_t sock_mutex;
55 int sock_fd;
56
57 uint32_t protocol_version;
58 uint32_t max_timeline_count;
59
60 struct {
61 enum virgl_renderer_capset id;
62 uint32_t version;
63 struct virgl_renderer_capset_venus data;
64 } capset;
65
66 uint32_t shmem_blob_mem;
67
68 struct util_sparse_array shmem_array;
69 struct util_sparse_array bo_array;
70
71 struct vn_renderer_shmem_cache shmem_cache;
72 };
73
74 static int
vtest_connect_socket(struct vn_instance * instance,const char * path)75 vtest_connect_socket(struct vn_instance *instance, const char *path)
76 {
77 struct sockaddr_un un;
78 int sock;
79
80 sock = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0);
81 if (sock < 0) {
82 vn_log(instance, "failed to create a socket");
83 return -1;
84 }
85
86 memset(&un, 0, sizeof(un));
87 un.sun_family = AF_UNIX;
88 memcpy(un.sun_path, path, strlen(path));
89
90 if (connect(sock, (struct sockaddr *)&un, sizeof(un)) == -1) {
91 vn_log(instance, "failed to connect to %s: %s", path, strerror(errno));
92 close(sock);
93 return -1;
94 }
95
96 return sock;
97 }
98
99 static void
vtest_read(struct vtest * vtest,void * buf,size_t size)100 vtest_read(struct vtest *vtest, void *buf, size_t size)
101 {
102 do {
103 const ssize_t ret = read(vtest->sock_fd, buf, size);
104 if (unlikely(ret < 0)) {
105 vn_log(vtest->instance,
106 "lost connection to rendering server on %zu read %zi %d",
107 size, ret, errno);
108 abort();
109 }
110
111 buf += ret;
112 size -= ret;
113 } while (size);
114 }
115
116 static int
vtest_receive_fd(struct vtest * vtest)117 vtest_receive_fd(struct vtest *vtest)
118 {
119 char cmsg_buf[CMSG_SPACE(sizeof(int))];
120 char dummy;
121 struct msghdr msg = {
122 .msg_iov =
123 &(struct iovec){
124 .iov_base = &dummy,
125 .iov_len = sizeof(dummy),
126 },
127 .msg_iovlen = 1,
128 .msg_control = cmsg_buf,
129 .msg_controllen = sizeof(cmsg_buf),
130 };
131
132 if (recvmsg(vtest->sock_fd, &msg, 0) < 0) {
133 vn_log(vtest->instance, "recvmsg failed: %s", strerror(errno));
134 abort();
135 }
136
137 struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
138 if (!cmsg || cmsg->cmsg_level != SOL_SOCKET ||
139 cmsg->cmsg_type != SCM_RIGHTS) {
140 vn_log(vtest->instance, "invalid cmsghdr");
141 abort();
142 }
143
144 return *((int *)CMSG_DATA(cmsg));
145 }
146
147 static void
vtest_write(struct vtest * vtest,const void * buf,size_t size)148 vtest_write(struct vtest *vtest, const void *buf, size_t size)
149 {
150 do {
151 const ssize_t ret = write(vtest->sock_fd, buf, size);
152 if (unlikely(ret < 0)) {
153 vn_log(vtest->instance,
154 "lost connection to rendering server on %zu write %zi %d",
155 size, ret, errno);
156 abort();
157 }
158
159 buf += ret;
160 size -= ret;
161 } while (size);
162 }
163
164 static void
vtest_vcmd_create_renderer(struct vtest * vtest,const char * name)165 vtest_vcmd_create_renderer(struct vtest *vtest, const char *name)
166 {
167 const size_t size = strlen(name) + 1;
168
169 uint32_t vtest_hdr[VTEST_HDR_SIZE];
170 vtest_hdr[VTEST_CMD_LEN] = size;
171 vtest_hdr[VTEST_CMD_ID] = VCMD_CREATE_RENDERER;
172
173 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
174 vtest_write(vtest, name, size);
175 }
176
177 static bool
vtest_vcmd_ping_protocol_version(struct vtest * vtest)178 vtest_vcmd_ping_protocol_version(struct vtest *vtest)
179 {
180 uint32_t vtest_hdr[VTEST_HDR_SIZE];
181 vtest_hdr[VTEST_CMD_LEN] = VCMD_PING_PROTOCOL_VERSION_SIZE;
182 vtest_hdr[VTEST_CMD_ID] = VCMD_PING_PROTOCOL_VERSION;
183
184 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
185
186 /* send a dummy busy wait to avoid blocking in vtest_read in case ping
187 * protocol version is not supported
188 */
189 uint32_t vcmd_busy_wait[VCMD_BUSY_WAIT_SIZE];
190 vtest_hdr[VTEST_CMD_LEN] = VCMD_BUSY_WAIT_SIZE;
191 vtest_hdr[VTEST_CMD_ID] = VCMD_RESOURCE_BUSY_WAIT;
192 vcmd_busy_wait[VCMD_BUSY_WAIT_HANDLE] = 0;
193 vcmd_busy_wait[VCMD_BUSY_WAIT_FLAGS] = 0;
194
195 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
196 vtest_write(vtest, vcmd_busy_wait, sizeof(vcmd_busy_wait));
197
198 uint32_t dummy;
199 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
200 if (vtest_hdr[VTEST_CMD_ID] == VCMD_PING_PROTOCOL_VERSION) {
201 /* consume the dummy busy wait result */
202 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
203 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_RESOURCE_BUSY_WAIT);
204 vtest_read(vtest, &dummy, sizeof(dummy));
205 return true;
206 } else {
207 /* no ping protocol version support */
208 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_RESOURCE_BUSY_WAIT);
209 vtest_read(vtest, &dummy, sizeof(dummy));
210 return false;
211 }
212 }
213
214 static uint32_t
vtest_vcmd_protocol_version(struct vtest * vtest)215 vtest_vcmd_protocol_version(struct vtest *vtest)
216 {
217 uint32_t vtest_hdr[VTEST_HDR_SIZE];
218 uint32_t vcmd_protocol_version[VCMD_PROTOCOL_VERSION_SIZE];
219 vtest_hdr[VTEST_CMD_LEN] = VCMD_PROTOCOL_VERSION_SIZE;
220 vtest_hdr[VTEST_CMD_ID] = VCMD_PROTOCOL_VERSION;
221 vcmd_protocol_version[VCMD_PROTOCOL_VERSION_VERSION] =
222 VTEST_PROTOCOL_VERSION;
223
224 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
225 vtest_write(vtest, vcmd_protocol_version, sizeof(vcmd_protocol_version));
226
227 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
228 assert(vtest_hdr[VTEST_CMD_LEN] == VCMD_PROTOCOL_VERSION_SIZE);
229 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_PROTOCOL_VERSION);
230 vtest_read(vtest, vcmd_protocol_version, sizeof(vcmd_protocol_version));
231
232 return vcmd_protocol_version[VCMD_PROTOCOL_VERSION_VERSION];
233 }
234
235 static uint32_t
vtest_vcmd_get_param(struct vtest * vtest,enum vcmd_param param)236 vtest_vcmd_get_param(struct vtest *vtest, enum vcmd_param param)
237 {
238 uint32_t vtest_hdr[VTEST_HDR_SIZE];
239 uint32_t vcmd_get_param[VCMD_GET_PARAM_SIZE];
240 vtest_hdr[VTEST_CMD_LEN] = VCMD_GET_PARAM_SIZE;
241 vtest_hdr[VTEST_CMD_ID] = VCMD_GET_PARAM;
242 vcmd_get_param[VCMD_GET_PARAM_PARAM] = param;
243
244 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
245 vtest_write(vtest, vcmd_get_param, sizeof(vcmd_get_param));
246
247 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
248 assert(vtest_hdr[VTEST_CMD_LEN] == 2);
249 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_GET_PARAM);
250
251 uint32_t resp[2];
252 vtest_read(vtest, resp, sizeof(resp));
253
254 return resp[0] ? resp[1] : 0;
255 }
256
257 static bool
vtest_vcmd_get_capset(struct vtest * vtest,enum virgl_renderer_capset id,uint32_t version,void * capset,size_t capset_size)258 vtest_vcmd_get_capset(struct vtest *vtest,
259 enum virgl_renderer_capset id,
260 uint32_t version,
261 void *capset,
262 size_t capset_size)
263 {
264 uint32_t vtest_hdr[VTEST_HDR_SIZE];
265 uint32_t vcmd_get_capset[VCMD_GET_CAPSET_SIZE];
266 vtest_hdr[VTEST_CMD_LEN] = VCMD_GET_CAPSET_SIZE;
267 vtest_hdr[VTEST_CMD_ID] = VCMD_GET_CAPSET;
268 vcmd_get_capset[VCMD_GET_CAPSET_ID] = id;
269 vcmd_get_capset[VCMD_GET_CAPSET_VERSION] = version;
270
271 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
272 vtest_write(vtest, vcmd_get_capset, sizeof(vcmd_get_capset));
273
274 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
275 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_GET_CAPSET);
276
277 uint32_t valid;
278 vtest_read(vtest, &valid, sizeof(valid));
279 if (!valid)
280 return false;
281
282 size_t read_size = (vtest_hdr[VTEST_CMD_LEN] - 1) * 4;
283 if (capset_size >= read_size) {
284 vtest_read(vtest, capset, read_size);
285 memset(capset + read_size, 0, capset_size - read_size);
286 } else {
287 vtest_read(vtest, capset, capset_size);
288
289 char temp[256];
290 read_size -= capset_size;
291 while (read_size) {
292 const size_t temp_size = MIN2(read_size, ARRAY_SIZE(temp));
293 vtest_read(vtest, temp, temp_size);
294 read_size -= temp_size;
295 }
296 }
297
298 return true;
299 }
300
301 static void
vtest_vcmd_context_init(struct vtest * vtest,enum virgl_renderer_capset capset_id)302 vtest_vcmd_context_init(struct vtest *vtest,
303 enum virgl_renderer_capset capset_id)
304 {
305 uint32_t vtest_hdr[VTEST_HDR_SIZE];
306 uint32_t vcmd_context_init[VCMD_CONTEXT_INIT_SIZE];
307 vtest_hdr[VTEST_CMD_LEN] = VCMD_CONTEXT_INIT_SIZE;
308 vtest_hdr[VTEST_CMD_ID] = VCMD_CONTEXT_INIT;
309 vcmd_context_init[VCMD_CONTEXT_INIT_CAPSET_ID] = capset_id;
310
311 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
312 vtest_write(vtest, vcmd_context_init, sizeof(vcmd_context_init));
313 }
314
315 static uint32_t
vtest_vcmd_resource_create_blob(struct vtest * vtest,enum vcmd_blob_type type,uint32_t flags,VkDeviceSize size,vn_object_id blob_id,int * res_fd)316 vtest_vcmd_resource_create_blob(struct vtest *vtest,
317 enum vcmd_blob_type type,
318 uint32_t flags,
319 VkDeviceSize size,
320 vn_object_id blob_id,
321 int *res_fd)
322 {
323 uint32_t vtest_hdr[VTEST_HDR_SIZE];
324 uint32_t vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_SIZE];
325
326 vtest_hdr[VTEST_CMD_LEN] = VCMD_RES_CREATE_BLOB_SIZE;
327 vtest_hdr[VTEST_CMD_ID] = VCMD_RESOURCE_CREATE_BLOB;
328
329 vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_TYPE] = type;
330 vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_FLAGS] = flags;
331 vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_SIZE_LO] = (uint32_t)size;
332 vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_SIZE_HI] =
333 (uint32_t)(size >> 32);
334 vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_ID_LO] = (uint32_t)blob_id;
335 vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_ID_HI] =
336 (uint32_t)(blob_id >> 32);
337
338 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
339 vtest_write(vtest, vcmd_res_create_blob, sizeof(vcmd_res_create_blob));
340
341 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
342 assert(vtest_hdr[VTEST_CMD_LEN] == 1);
343 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_RESOURCE_CREATE_BLOB);
344
345 uint32_t res_id;
346 vtest_read(vtest, &res_id, sizeof(res_id));
347
348 *res_fd = vtest_receive_fd(vtest);
349
350 return res_id;
351 }
352
353 static void
vtest_vcmd_resource_unref(struct vtest * vtest,uint32_t res_id)354 vtest_vcmd_resource_unref(struct vtest *vtest, uint32_t res_id)
355 {
356 uint32_t vtest_hdr[VTEST_HDR_SIZE];
357 uint32_t vcmd_res_unref[VCMD_RES_UNREF_SIZE];
358
359 vtest_hdr[VTEST_CMD_LEN] = VCMD_RES_UNREF_SIZE;
360 vtest_hdr[VTEST_CMD_ID] = VCMD_RESOURCE_UNREF;
361 vcmd_res_unref[VCMD_RES_UNREF_RES_HANDLE] = res_id;
362
363 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
364 vtest_write(vtest, vcmd_res_unref, sizeof(vcmd_res_unref));
365 }
366
367 static uint32_t
vtest_vcmd_sync_create(struct vtest * vtest,uint64_t initial_val)368 vtest_vcmd_sync_create(struct vtest *vtest, uint64_t initial_val)
369 {
370 uint32_t vtest_hdr[VTEST_HDR_SIZE];
371 uint32_t vcmd_sync_create[VCMD_SYNC_CREATE_SIZE];
372
373 vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_CREATE_SIZE;
374 vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_CREATE;
375
376 vcmd_sync_create[VCMD_SYNC_CREATE_VALUE_LO] = (uint32_t)initial_val;
377 vcmd_sync_create[VCMD_SYNC_CREATE_VALUE_HI] =
378 (uint32_t)(initial_val >> 32);
379
380 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
381 vtest_write(vtest, vcmd_sync_create, sizeof(vcmd_sync_create));
382
383 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
384 assert(vtest_hdr[VTEST_CMD_LEN] == 1);
385 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_SYNC_CREATE);
386
387 uint32_t sync_id;
388 vtest_read(vtest, &sync_id, sizeof(sync_id));
389
390 return sync_id;
391 }
392
393 static void
vtest_vcmd_sync_unref(struct vtest * vtest,uint32_t sync_id)394 vtest_vcmd_sync_unref(struct vtest *vtest, uint32_t sync_id)
395 {
396 uint32_t vtest_hdr[VTEST_HDR_SIZE];
397 uint32_t vcmd_sync_unref[VCMD_SYNC_UNREF_SIZE];
398
399 vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_UNREF_SIZE;
400 vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_UNREF;
401 vcmd_sync_unref[VCMD_SYNC_UNREF_ID] = sync_id;
402
403 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
404 vtest_write(vtest, vcmd_sync_unref, sizeof(vcmd_sync_unref));
405 }
406
407 static uint64_t
vtest_vcmd_sync_read(struct vtest * vtest,uint32_t sync_id)408 vtest_vcmd_sync_read(struct vtest *vtest, uint32_t sync_id)
409 {
410 uint32_t vtest_hdr[VTEST_HDR_SIZE];
411 uint32_t vcmd_sync_read[VCMD_SYNC_READ_SIZE];
412
413 vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_READ_SIZE;
414 vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_READ;
415
416 vcmd_sync_read[VCMD_SYNC_READ_ID] = sync_id;
417
418 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
419 vtest_write(vtest, vcmd_sync_read, sizeof(vcmd_sync_read));
420
421 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
422 assert(vtest_hdr[VTEST_CMD_LEN] == 2);
423 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_SYNC_READ);
424
425 uint64_t val;
426 vtest_read(vtest, &val, sizeof(val));
427
428 return val;
429 }
430
431 static void
vtest_vcmd_sync_write(struct vtest * vtest,uint32_t sync_id,uint64_t val)432 vtest_vcmd_sync_write(struct vtest *vtest, uint32_t sync_id, uint64_t val)
433 {
434 uint32_t vtest_hdr[VTEST_HDR_SIZE];
435 uint32_t vcmd_sync_write[VCMD_SYNC_WRITE_SIZE];
436
437 vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_WRITE_SIZE;
438 vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_WRITE;
439
440 vcmd_sync_write[VCMD_SYNC_WRITE_ID] = sync_id;
441 vcmd_sync_write[VCMD_SYNC_WRITE_VALUE_LO] = (uint32_t)val;
442 vcmd_sync_write[VCMD_SYNC_WRITE_VALUE_HI] = (uint32_t)(val >> 32);
443
444 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
445 vtest_write(vtest, vcmd_sync_write, sizeof(vcmd_sync_write));
446 }
447
448 static int
vtest_vcmd_sync_wait(struct vtest * vtest,uint32_t flags,int poll_timeout,struct vn_renderer_sync * const * syncs,const uint64_t * vals,uint32_t count)449 vtest_vcmd_sync_wait(struct vtest *vtest,
450 uint32_t flags,
451 int poll_timeout,
452 struct vn_renderer_sync *const *syncs,
453 const uint64_t *vals,
454 uint32_t count)
455 {
456 const uint32_t timeout = poll_timeout >= 0 && poll_timeout <= INT32_MAX
457 ? poll_timeout
458 : UINT32_MAX;
459
460 uint32_t vtest_hdr[VTEST_HDR_SIZE];
461 vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_WAIT_SIZE(count);
462 vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_WAIT;
463
464 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
465 vtest_write(vtest, &flags, sizeof(flags));
466 vtest_write(vtest, &timeout, sizeof(timeout));
467 for (uint32_t i = 0; i < count; i++) {
468 const uint64_t val = vals[i];
469 const uint32_t sync[3] = {
470 syncs[i]->sync_id,
471 (uint32_t)val,
472 (uint32_t)(val >> 32),
473 };
474 vtest_write(vtest, sync, sizeof(sync));
475 }
476
477 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
478 assert(vtest_hdr[VTEST_CMD_LEN] == 0);
479 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_SYNC_WAIT);
480
481 return vtest_receive_fd(vtest);
482 }
483
484 static void
submit_cmd2_sizes(const struct vn_renderer_submit * submit,size_t * header_size,size_t * cs_size,size_t * sync_size)485 submit_cmd2_sizes(const struct vn_renderer_submit *submit,
486 size_t *header_size,
487 size_t *cs_size,
488 size_t *sync_size)
489 {
490 if (!submit->batch_count) {
491 *header_size = 0;
492 *cs_size = 0;
493 *sync_size = 0;
494 return;
495 }
496
497 *header_size = sizeof(uint32_t) +
498 sizeof(struct vcmd_submit_cmd2_batch) * submit->batch_count;
499
500 *cs_size = 0;
501 *sync_size = 0;
502 for (uint32_t i = 0; i < submit->batch_count; i++) {
503 const struct vn_renderer_submit_batch *batch = &submit->batches[i];
504 assert(batch->cs_size % sizeof(uint32_t) == 0);
505 *cs_size += batch->cs_size;
506 *sync_size += (sizeof(uint32_t) + sizeof(uint64_t)) * batch->sync_count;
507 }
508
509 assert(*header_size % sizeof(uint32_t) == 0);
510 assert(*cs_size % sizeof(uint32_t) == 0);
511 assert(*sync_size % sizeof(uint32_t) == 0);
512 }
513
514 static void
vtest_vcmd_submit_cmd2(struct vtest * vtest,const struct vn_renderer_submit * submit)515 vtest_vcmd_submit_cmd2(struct vtest *vtest,
516 const struct vn_renderer_submit *submit)
517 {
518 size_t header_size;
519 size_t cs_size;
520 size_t sync_size;
521 submit_cmd2_sizes(submit, &header_size, &cs_size, &sync_size);
522 const size_t total_size = header_size + cs_size + sync_size;
523 if (!total_size)
524 return;
525
526 uint32_t vtest_hdr[VTEST_HDR_SIZE];
527 vtest_hdr[VTEST_CMD_LEN] = total_size / sizeof(uint32_t);
528 vtest_hdr[VTEST_CMD_ID] = VCMD_SUBMIT_CMD2;
529 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
530
531 /* write batch count and batch headers */
532 const uint32_t batch_count = submit->batch_count;
533 size_t cs_offset = header_size;
534 size_t sync_offset = cs_offset + cs_size;
535 vtest_write(vtest, &batch_count, sizeof(batch_count));
536 for (uint32_t i = 0; i < submit->batch_count; i++) {
537 const struct vn_renderer_submit_batch *batch = &submit->batches[i];
538 struct vcmd_submit_cmd2_batch dst = {
539 .flags = VCMD_SUBMIT_CMD2_FLAG_RING_IDX,
540 .cmd_offset = cs_offset / sizeof(uint32_t),
541 .cmd_size = batch->cs_size / sizeof(uint32_t),
542 .sync_offset = sync_offset / sizeof(uint32_t),
543 .sync_count = batch->sync_count,
544 .ring_idx = batch->ring_idx,
545 };
546 vtest_write(vtest, &dst, sizeof(dst));
547
548 cs_offset += batch->cs_size;
549 sync_offset +=
550 (sizeof(uint32_t) + sizeof(uint64_t)) * batch->sync_count;
551 }
552
553 /* write cs */
554 if (cs_size) {
555 for (uint32_t i = 0; i < submit->batch_count; i++) {
556 const struct vn_renderer_submit_batch *batch = &submit->batches[i];
557 if (batch->cs_size)
558 vtest_write(vtest, batch->cs_data, batch->cs_size);
559 }
560 }
561
562 /* write syncs */
563 for (uint32_t i = 0; i < submit->batch_count; i++) {
564 const struct vn_renderer_submit_batch *batch = &submit->batches[i];
565
566 for (uint32_t j = 0; j < batch->sync_count; j++) {
567 const uint64_t val = batch->sync_values[j];
568 const uint32_t sync[3] = {
569 batch->syncs[j]->sync_id,
570 (uint32_t)val,
571 (uint32_t)(val >> 32),
572 };
573 vtest_write(vtest, sync, sizeof(sync));
574 }
575 }
576 }
577
578 static VkResult
vtest_sync_write(struct vn_renderer * renderer,struct vn_renderer_sync * _sync,uint64_t val)579 vtest_sync_write(struct vn_renderer *renderer,
580 struct vn_renderer_sync *_sync,
581 uint64_t val)
582 {
583 struct vtest *vtest = (struct vtest *)renderer;
584 struct vtest_sync *sync = (struct vtest_sync *)_sync;
585
586 mtx_lock(&vtest->sock_mutex);
587 vtest_vcmd_sync_write(vtest, sync->base.sync_id, val);
588 mtx_unlock(&vtest->sock_mutex);
589
590 return VK_SUCCESS;
591 }
592
593 static VkResult
vtest_sync_read(struct vn_renderer * renderer,struct vn_renderer_sync * _sync,uint64_t * val)594 vtest_sync_read(struct vn_renderer *renderer,
595 struct vn_renderer_sync *_sync,
596 uint64_t *val)
597 {
598 struct vtest *vtest = (struct vtest *)renderer;
599 struct vtest_sync *sync = (struct vtest_sync *)_sync;
600
601 mtx_lock(&vtest->sock_mutex);
602 *val = vtest_vcmd_sync_read(vtest, sync->base.sync_id);
603 mtx_unlock(&vtest->sock_mutex);
604
605 return VK_SUCCESS;
606 }
607
608 static VkResult
vtest_sync_reset(struct vn_renderer * renderer,struct vn_renderer_sync * sync,uint64_t initial_val)609 vtest_sync_reset(struct vn_renderer *renderer,
610 struct vn_renderer_sync *sync,
611 uint64_t initial_val)
612 {
613 /* same as write */
614 return vtest_sync_write(renderer, sync, initial_val);
615 }
616
617 static void
vtest_sync_destroy(struct vn_renderer * renderer,struct vn_renderer_sync * _sync)618 vtest_sync_destroy(struct vn_renderer *renderer,
619 struct vn_renderer_sync *_sync)
620 {
621 struct vtest *vtest = (struct vtest *)renderer;
622 struct vtest_sync *sync = (struct vtest_sync *)_sync;
623
624 mtx_lock(&vtest->sock_mutex);
625 vtest_vcmd_sync_unref(vtest, sync->base.sync_id);
626 mtx_unlock(&vtest->sock_mutex);
627
628 free(sync);
629 }
630
631 static VkResult
vtest_sync_create(struct vn_renderer * renderer,uint64_t initial_val,uint32_t flags,struct vn_renderer_sync ** out_sync)632 vtest_sync_create(struct vn_renderer *renderer,
633 uint64_t initial_val,
634 uint32_t flags,
635 struct vn_renderer_sync **out_sync)
636 {
637 struct vtest *vtest = (struct vtest *)renderer;
638
639 struct vtest_sync *sync = calloc(1, sizeof(*sync));
640 if (!sync)
641 return VK_ERROR_OUT_OF_HOST_MEMORY;
642
643 mtx_lock(&vtest->sock_mutex);
644 sync->base.sync_id = vtest_vcmd_sync_create(vtest, initial_val);
645 mtx_unlock(&vtest->sock_mutex);
646
647 *out_sync = &sync->base;
648 return VK_SUCCESS;
649 }
650
651 static void
vtest_bo_invalidate(struct vn_renderer * renderer,struct vn_renderer_bo * bo,VkDeviceSize offset,VkDeviceSize size)652 vtest_bo_invalidate(struct vn_renderer *renderer,
653 struct vn_renderer_bo *bo,
654 VkDeviceSize offset,
655 VkDeviceSize size)
656 {
657 /* nop */
658 }
659
660 static void
vtest_bo_flush(struct vn_renderer * renderer,struct vn_renderer_bo * bo,VkDeviceSize offset,VkDeviceSize size)661 vtest_bo_flush(struct vn_renderer *renderer,
662 struct vn_renderer_bo *bo,
663 VkDeviceSize offset,
664 VkDeviceSize size)
665 {
666 /* nop */
667 }
668
669 static void *
vtest_bo_map(struct vn_renderer * renderer,struct vn_renderer_bo * _bo)670 vtest_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)
671 {
672 struct vtest *vtest = (struct vtest *)renderer;
673 struct vtest_bo *bo = (struct vtest_bo *)_bo;
674 const bool mappable = bo->blob_flags & VCMD_BLOB_FLAG_MAPPABLE;
675 const bool shareable = bo->blob_flags & VCMD_BLOB_FLAG_SHAREABLE;
676
677 /* not thread-safe but is fine */
678 if (!bo->base.mmap_ptr && mappable) {
679 /* We wrongly assume that mmap(dma_buf) and vkMapMemory(VkDeviceMemory)
680 * are equivalent when the blob type is VCMD_BLOB_TYPE_HOST3D. While we
681 * check for VCMD_PARAM_HOST_COHERENT_DMABUF_BLOB, we know vtest can
682 * lie.
683 */
684 void *ptr = mmap(NULL, bo->base.mmap_size, PROT_READ | PROT_WRITE,
685 MAP_SHARED, bo->res_fd, 0);
686 if (ptr == MAP_FAILED) {
687 vn_log(vtest->instance, "failed to mmap %d of size %zu rw: %s",
688 bo->res_fd, bo->base.mmap_size, strerror(errno));
689 } else {
690 bo->base.mmap_ptr = ptr;
691 /* we don't need the fd anymore */
692 if (!shareable) {
693 close(bo->res_fd);
694 bo->res_fd = -1;
695 }
696 }
697 }
698
699 return bo->base.mmap_ptr;
700 }
701
702 static int
vtest_bo_export_dma_buf(struct vn_renderer * renderer,struct vn_renderer_bo * _bo)703 vtest_bo_export_dma_buf(struct vn_renderer *renderer,
704 struct vn_renderer_bo *_bo)
705 {
706 const struct vtest_bo *bo = (struct vtest_bo *)_bo;
707 const bool shareable = bo->blob_flags & VCMD_BLOB_FLAG_SHAREABLE;
708 return shareable ? os_dupfd_cloexec(bo->res_fd) : -1;
709 }
710
711 static bool
vtest_bo_destroy(struct vn_renderer * renderer,struct vn_renderer_bo * _bo)712 vtest_bo_destroy(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)
713 {
714 struct vtest *vtest = (struct vtest *)renderer;
715 struct vtest_bo *bo = (struct vtest_bo *)_bo;
716
717 if (bo->base.mmap_ptr)
718 munmap(bo->base.mmap_ptr, bo->base.mmap_size);
719 if (bo->res_fd >= 0)
720 close(bo->res_fd);
721
722 mtx_lock(&vtest->sock_mutex);
723 vtest_vcmd_resource_unref(vtest, bo->base.res_id);
724 mtx_unlock(&vtest->sock_mutex);
725
726 return true;
727 }
728
729 static uint32_t
vtest_bo_blob_flags(VkMemoryPropertyFlags flags,VkExternalMemoryHandleTypeFlags external_handles)730 vtest_bo_blob_flags(VkMemoryPropertyFlags flags,
731 VkExternalMemoryHandleTypeFlags external_handles)
732 {
733 uint32_t blob_flags = 0;
734 if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
735 blob_flags |= VCMD_BLOB_FLAG_MAPPABLE;
736 if (external_handles)
737 blob_flags |= VCMD_BLOB_FLAG_SHAREABLE;
738 if (external_handles & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)
739 blob_flags |= VCMD_BLOB_FLAG_CROSS_DEVICE;
740
741 return blob_flags;
742 }
743
744 static VkResult
vtest_bo_create_from_device_memory(struct vn_renderer * renderer,VkDeviceSize size,vn_object_id mem_id,VkMemoryPropertyFlags flags,VkExternalMemoryHandleTypeFlags external_handles,struct vn_renderer_bo ** out_bo)745 vtest_bo_create_from_device_memory(
746 struct vn_renderer *renderer,
747 VkDeviceSize size,
748 vn_object_id mem_id,
749 VkMemoryPropertyFlags flags,
750 VkExternalMemoryHandleTypeFlags external_handles,
751 struct vn_renderer_bo **out_bo)
752 {
753 struct vtest *vtest = (struct vtest *)renderer;
754 const uint32_t blob_flags = vtest_bo_blob_flags(flags, external_handles);
755
756 mtx_lock(&vtest->sock_mutex);
757 int res_fd;
758 uint32_t res_id = vtest_vcmd_resource_create_blob(
759 vtest, VCMD_BLOB_TYPE_HOST3D, blob_flags, size, mem_id, &res_fd);
760 assert(res_id > 0 && res_fd >= 0);
761 mtx_unlock(&vtest->sock_mutex);
762
763 struct vtest_bo *bo = util_sparse_array_get(&vtest->bo_array, res_id);
764 *bo = (struct vtest_bo){
765 .base = {
766 .refcount = VN_REFCOUNT_INIT(1),
767 .res_id = res_id,
768 .mmap_size = size,
769 },
770 .res_fd = res_fd,
771 .blob_flags = blob_flags,
772 };
773
774 *out_bo = &bo->base;
775
776 return VK_SUCCESS;
777 }
778
779 static void
vtest_shmem_destroy_now(struct vn_renderer * renderer,struct vn_renderer_shmem * _shmem)780 vtest_shmem_destroy_now(struct vn_renderer *renderer,
781 struct vn_renderer_shmem *_shmem)
782 {
783 struct vtest *vtest = (struct vtest *)renderer;
784 struct vtest_shmem *shmem = (struct vtest_shmem *)_shmem;
785
786 munmap(shmem->base.mmap_ptr, shmem->base.mmap_size);
787
788 mtx_lock(&vtest->sock_mutex);
789 vtest_vcmd_resource_unref(vtest, shmem->base.res_id);
790 mtx_unlock(&vtest->sock_mutex);
791 }
792
793 static void
vtest_shmem_destroy(struct vn_renderer * renderer,struct vn_renderer_shmem * shmem)794 vtest_shmem_destroy(struct vn_renderer *renderer,
795 struct vn_renderer_shmem *shmem)
796 {
797 struct vtest *vtest = (struct vtest *)renderer;
798
799 if (vn_renderer_shmem_cache_add(&vtest->shmem_cache, shmem))
800 return;
801
802 vtest_shmem_destroy_now(&vtest->base, shmem);
803 }
804
805 static struct vn_renderer_shmem *
vtest_shmem_create(struct vn_renderer * renderer,size_t size)806 vtest_shmem_create(struct vn_renderer *renderer, size_t size)
807 {
808 struct vtest *vtest = (struct vtest *)renderer;
809
810 struct vn_renderer_shmem *cached_shmem =
811 vn_renderer_shmem_cache_get(&vtest->shmem_cache, size);
812 if (cached_shmem) {
813 cached_shmem->refcount = VN_REFCOUNT_INIT(1);
814 return cached_shmem;
815 }
816
817 mtx_lock(&vtest->sock_mutex);
818 int res_fd;
819 uint32_t res_id = vtest_vcmd_resource_create_blob(
820 vtest, vtest->shmem_blob_mem, VCMD_BLOB_FLAG_MAPPABLE, size, 0,
821 &res_fd);
822 assert(res_id > 0 && res_fd >= 0);
823 mtx_unlock(&vtest->sock_mutex);
824
825 void *ptr =
826 mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, res_fd, 0);
827 close(res_fd);
828 if (ptr == MAP_FAILED) {
829 mtx_lock(&vtest->sock_mutex);
830 vtest_vcmd_resource_unref(vtest, res_id);
831 mtx_unlock(&vtest->sock_mutex);
832 return NULL;
833 }
834
835 struct vtest_shmem *shmem =
836 util_sparse_array_get(&vtest->shmem_array, res_id);
837 *shmem = (struct vtest_shmem){
838 .base = {
839 .refcount = VN_REFCOUNT_INIT(1),
840 .res_id = res_id,
841 .mmap_size = size,
842 .mmap_ptr = ptr,
843 },
844 };
845
846 return &shmem->base;
847 }
848
849 static VkResult
sync_wait_poll(int fd,int poll_timeout)850 sync_wait_poll(int fd, int poll_timeout)
851 {
852 struct pollfd pollfd = {
853 .fd = fd,
854 .events = POLLIN,
855 };
856 int ret;
857 do {
858 ret = poll(&pollfd, 1, poll_timeout);
859 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
860
861 if (ret < 0 || (ret > 0 && !(pollfd.revents & POLLIN))) {
862 return (ret < 0 && errno == ENOMEM) ? VK_ERROR_OUT_OF_HOST_MEMORY
863 : VK_ERROR_DEVICE_LOST;
864 }
865
866 return ret ? VK_SUCCESS : VK_TIMEOUT;
867 }
868
869 static int
timeout_to_poll_timeout(uint64_t timeout)870 timeout_to_poll_timeout(uint64_t timeout)
871 {
872 const uint64_t ns_per_ms = 1000000;
873 const uint64_t ms = (timeout + ns_per_ms - 1) / ns_per_ms;
874 if (!ms && timeout)
875 return -1;
876 return ms <= INT_MAX ? ms : -1;
877 }
878
879 static VkResult
vtest_wait(struct vn_renderer * renderer,const struct vn_renderer_wait * wait)880 vtest_wait(struct vn_renderer *renderer, const struct vn_renderer_wait *wait)
881 {
882 struct vtest *vtest = (struct vtest *)renderer;
883 const uint32_t flags = wait->wait_any ? VCMD_SYNC_WAIT_FLAG_ANY : 0;
884 const int poll_timeout = timeout_to_poll_timeout(wait->timeout);
885
886 /*
887 * vtest_vcmd_sync_wait (and some other sync commands) is executed after
888 * all prior commands are dispatched. That is far from ideal.
889 *
890 * In virtio-gpu, a drm_syncobj wait ioctl is executed immediately. It
891 * works because it uses virtio-gpu interrupts as a side channel. vtest
892 * needs a side channel to perform well.
893 *
894 * virtio-gpu or vtest, we should also set up a 1-byte coherent memory that
895 * is set to non-zero by GPU after the syncs signal. That would allow us
896 * to do a quick check (or spin a bit) before waiting.
897 */
898 mtx_lock(&vtest->sock_mutex);
899 const int fd =
900 vtest_vcmd_sync_wait(vtest, flags, poll_timeout, wait->syncs,
901 wait->sync_values, wait->sync_count);
902 mtx_unlock(&vtest->sock_mutex);
903
904 VkResult result = sync_wait_poll(fd, poll_timeout);
905 close(fd);
906
907 return result;
908 }
909
910 static VkResult
vtest_submit(struct vn_renderer * renderer,const struct vn_renderer_submit * submit)911 vtest_submit(struct vn_renderer *renderer,
912 const struct vn_renderer_submit *submit)
913 {
914 struct vtest *vtest = (struct vtest *)renderer;
915
916 mtx_lock(&vtest->sock_mutex);
917 vtest_vcmd_submit_cmd2(vtest, submit);
918 mtx_unlock(&vtest->sock_mutex);
919
920 return VK_SUCCESS;
921 }
922
923 static void
vtest_init_renderer_info(struct vtest * vtest)924 vtest_init_renderer_info(struct vtest *vtest)
925 {
926 struct vn_renderer_info *info = &vtest->base.info;
927
928 info->pci.vendor_id = VTEST_PCI_VENDOR_ID;
929 info->pci.device_id = VTEST_PCI_DEVICE_ID;
930
931 info->has_dma_buf_import = false;
932 info->has_external_sync = false;
933 info->has_implicit_fencing = false;
934
935 const struct virgl_renderer_capset_venus *capset = &vtest->capset.data;
936 info->wire_format_version = capset->wire_format_version;
937 info->vk_xml_version = capset->vk_xml_version;
938 info->vk_ext_command_serialization_spec_version =
939 capset->vk_ext_command_serialization_spec_version;
940 info->vk_mesa_venus_protocol_spec_version =
941 capset->vk_mesa_venus_protocol_spec_version;
942 assert(capset->supports_blob_id_0);
943
944 /* ensure vk_extension_mask is large enough to hold all capset masks */
945 STATIC_ASSERT(sizeof(info->vk_extension_mask) >=
946 sizeof(capset->vk_extension_mask1));
947 memcpy(info->vk_extension_mask, capset->vk_extension_mask1,
948 sizeof(capset->vk_extension_mask1));
949
950 assert(capset->allow_vk_wait_syncs);
951
952 assert(capset->supports_multiple_timelines);
953 info->max_timeline_count = vtest->max_timeline_count;
954 }
955
956 static void
vtest_destroy(struct vn_renderer * renderer,const VkAllocationCallbacks * alloc)957 vtest_destroy(struct vn_renderer *renderer,
958 const VkAllocationCallbacks *alloc)
959 {
960 struct vtest *vtest = (struct vtest *)renderer;
961
962 vn_renderer_shmem_cache_fini(&vtest->shmem_cache);
963
964 if (vtest->sock_fd >= 0) {
965 shutdown(vtest->sock_fd, SHUT_RDWR);
966 close(vtest->sock_fd);
967 }
968
969 mtx_destroy(&vtest->sock_mutex);
970 util_sparse_array_finish(&vtest->shmem_array);
971 util_sparse_array_finish(&vtest->bo_array);
972
973 vk_free(alloc, vtest);
974 }
975
976 static VkResult
vtest_init_capset(struct vtest * vtest)977 vtest_init_capset(struct vtest *vtest)
978 {
979 vtest->capset.id = VIRGL_RENDERER_CAPSET_VENUS;
980 vtest->capset.version = 0;
981
982 if (!vtest_vcmd_get_capset(vtest, vtest->capset.id, vtest->capset.version,
983 &vtest->capset.data,
984 sizeof(vtest->capset.data))) {
985 vn_log(vtest->instance, "no venus capset");
986 return VK_ERROR_INITIALIZATION_FAILED;
987 }
988
989 return VK_SUCCESS;
990 }
991
992 static VkResult
vtest_init_params(struct vtest * vtest)993 vtest_init_params(struct vtest *vtest)
994 {
995 uint32_t val = vtest_vcmd_get_param(vtest, VCMD_PARAM_MAX_TIMELINE_COUNT);
996 if (!val) {
997 vn_log(vtest->instance, "no timeline support");
998 return VK_ERROR_INITIALIZATION_FAILED;
999 }
1000 vtest->max_timeline_count = val;
1001
1002 return VK_SUCCESS;
1003 }
1004
1005 static VkResult
vtest_init_protocol_version(struct vtest * vtest)1006 vtest_init_protocol_version(struct vtest *vtest)
1007 {
1008 const uint32_t min_protocol_version = 3;
1009
1010 const uint32_t ver = vtest_vcmd_ping_protocol_version(vtest)
1011 ? vtest_vcmd_protocol_version(vtest)
1012 : 0;
1013 if (ver < min_protocol_version) {
1014 vn_log(vtest->instance, "vtest protocol version (%d) too old", ver);
1015 return VK_ERROR_INITIALIZATION_FAILED;
1016 }
1017
1018 vtest->protocol_version = ver;
1019
1020 return VK_SUCCESS;
1021 }
1022
1023 static VkResult
vtest_init(struct vtest * vtest)1024 vtest_init(struct vtest *vtest)
1025 {
1026 const char *socket_name = os_get_option("VTEST_SOCKET_NAME");
1027
1028 util_sparse_array_init(&vtest->shmem_array, sizeof(struct vtest_shmem),
1029 1024);
1030 util_sparse_array_init(&vtest->bo_array, sizeof(struct vtest_bo), 1024);
1031
1032 mtx_init(&vtest->sock_mutex, mtx_plain);
1033 vtest->sock_fd = vtest_connect_socket(
1034 vtest->instance, socket_name ? socket_name : VTEST_DEFAULT_SOCKET_NAME);
1035 if (vtest->sock_fd < 0)
1036 return VK_ERROR_INITIALIZATION_FAILED;
1037
1038 const char *renderer_name = util_get_process_name();
1039 if (!renderer_name)
1040 renderer_name = "venus";
1041 vtest_vcmd_create_renderer(vtest, renderer_name);
1042
1043 VkResult result = vtest_init_protocol_version(vtest);
1044 if (result == VK_SUCCESS)
1045 result = vtest_init_params(vtest);
1046 if (result == VK_SUCCESS)
1047 result = vtest_init_capset(vtest);
1048 if (result != VK_SUCCESS)
1049 return result;
1050
1051 /* see virtgpu_init_shmem_blob_mem */
1052 assert(vtest->capset.data.supports_blob_id_0);
1053 vtest->shmem_blob_mem = VCMD_BLOB_TYPE_HOST3D;
1054
1055 vn_renderer_shmem_cache_init(&vtest->shmem_cache, &vtest->base,
1056 vtest_shmem_destroy_now);
1057
1058 vtest_vcmd_context_init(vtest, vtest->capset.id);
1059
1060 vtest_init_renderer_info(vtest);
1061
1062 vtest->base.ops.destroy = vtest_destroy;
1063 vtest->base.ops.submit = vtest_submit;
1064 vtest->base.ops.wait = vtest_wait;
1065
1066 vtest->base.shmem_ops.create = vtest_shmem_create;
1067 vtest->base.shmem_ops.destroy = vtest_shmem_destroy;
1068
1069 vtest->base.bo_ops.create_from_device_memory =
1070 vtest_bo_create_from_device_memory;
1071 vtest->base.bo_ops.create_from_dma_buf = NULL;
1072 vtest->base.bo_ops.destroy = vtest_bo_destroy;
1073 vtest->base.bo_ops.export_dma_buf = vtest_bo_export_dma_buf;
1074 vtest->base.bo_ops.map = vtest_bo_map;
1075 vtest->base.bo_ops.flush = vtest_bo_flush;
1076 vtest->base.bo_ops.invalidate = vtest_bo_invalidate;
1077
1078 vtest->base.sync_ops.create = vtest_sync_create;
1079 vtest->base.sync_ops.create_from_syncobj = NULL;
1080 vtest->base.sync_ops.destroy = vtest_sync_destroy;
1081 vtest->base.sync_ops.export_syncobj = NULL;
1082 vtest->base.sync_ops.reset = vtest_sync_reset;
1083 vtest->base.sync_ops.read = vtest_sync_read;
1084 vtest->base.sync_ops.write = vtest_sync_write;
1085
1086 return VK_SUCCESS;
1087 }
1088
1089 VkResult
vn_renderer_create_vtest(struct vn_instance * instance,const VkAllocationCallbacks * alloc,struct vn_renderer ** renderer)1090 vn_renderer_create_vtest(struct vn_instance *instance,
1091 const VkAllocationCallbacks *alloc,
1092 struct vn_renderer **renderer)
1093 {
1094 struct vtest *vtest = vk_zalloc(alloc, sizeof(*vtest), VN_DEFAULT_ALIGN,
1095 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1096 if (!vtest)
1097 return VK_ERROR_OUT_OF_HOST_MEMORY;
1098
1099 vtest->instance = instance;
1100 vtest->sock_fd = -1;
1101
1102 VkResult result = vtest_init(vtest);
1103 if (result != VK_SUCCESS) {
1104 vtest_destroy(&vtest->base, alloc);
1105 return result;
1106 }
1107
1108 *renderer = &vtest->base;
1109
1110 return VK_SUCCESS;
1111 }
1112