1 /*
2 * Copyright © 2022 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Capture the hanging application with INTEL_DEBUG=capture-all
24 *
25 * Turn the error state into a replay file with :
26 * $ intel_error2hangdump error_state
27 *
28 * Replay with :
29 * $ intel_hang_replay -d error_state.dmp
30 */
31
32 #include <fcntl.h>
33 #include <getopt.h>
34 #include <inttypes.h>
35 #include <stdbool.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <sys/mman.h>
39 #include <sys/stat.h>
40 #include <sys/types.h>
41 #include <unistd.h>
42
43 #include <xf86drm.h>
44
45 #include "common/intel_gem.h"
46 #include "common/i915/intel_gem.h"
47 #include "common/intel_hang_dump.h"
48 #include "compiler/elk/elk_disasm.h"
49 #include "compiler/elk/elk_isa_info.h"
50 #include "compiler/brw_disasm.h"
51 #include "compiler/brw_isa_info.h"
52 #include "dev/intel_device_info.h"
53
54 #include "drm-uapi/i915_drm.h"
55
56 #include "util/u_dynarray.h"
57 #include "util/u_math.h"
58
59 static uint32_t
gem_create(int drm_fd,uint64_t size)60 gem_create(int drm_fd, uint64_t size)
61 {
62 struct drm_i915_gem_create gem_create = {
63 .size = size,
64 };
65
66 int ret = intel_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
67 if (ret != 0) {
68 /* FIXME: What do we do if this fails? */
69 return 0;
70 }
71
72 return gem_create.handle;
73 }
74
75 static uint32_t
gem_context_create(int drm_fd)76 gem_context_create(int drm_fd)
77 {
78 /* TODO: add additional information in the intel_hang_dump_block_exec &
79 * intel_hang_dump_block_hw_image structures to specify the engine and use
80 * the correct engine here.
81 */
82 I915_DEFINE_CONTEXT_PARAM_ENGINES(engines_param, 1) = { };
83 struct drm_i915_gem_context_create_ext_setparam set_engines = {
84 .param = {
85 .param = I915_CONTEXT_PARAM_ENGINES,
86 .value = (uintptr_t)&engines_param,
87 .size = sizeof(engines_param),
88 }
89 };
90 struct drm_i915_gem_context_create_ext_setparam recoverable_param = {
91 .param = {
92 .param = I915_CONTEXT_PARAM_RECOVERABLE,
93 .value = 0,
94 },
95 };
96 struct drm_i915_gem_context_create_ext create = {
97 .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
98 };
99
100 intel_i915_gem_add_ext(&create.extensions,
101 I915_CONTEXT_CREATE_EXT_SETPARAM,
102 &set_engines.base);
103 intel_i915_gem_add_ext(&create.extensions,
104 I915_CONTEXT_CREATE_EXT_SETPARAM,
105 &recoverable_param.base);
106
107 if (intel_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT, &create) == -1)
108 return false;
109
110 return create.ctx_id;
111 }
112
113 static bool
gem_context_set_hw_image(int drm_fd,uint32_t ctx_id,const void * hw_img_data,uint32_t img_size)114 gem_context_set_hw_image(int drm_fd, uint32_t ctx_id,
115 const void *hw_img_data, uint32_t img_size)
116 {
117 /* TODO: add additional information in the intel_hang_dump_block_exec &
118 * intel_hang_dump_block_hw_image structures to specify the engine and use
119 * the correct engine here.
120 */
121 struct i915_gem_context_param_context_image img_param = {
122 .engine = {
123 .engine_class = 0,
124 .engine_instance = 0,
125 },
126 .flags = I915_CONTEXT_IMAGE_FLAG_ENGINE_INDEX,
127 .size = img_size,
128 .image = (uint64_t)(uintptr_t)hw_img_data,
129 };
130 struct drm_i915_gem_context_param param = {
131 .ctx_id = ctx_id,
132 .param = I915_CONTEXT_PARAM_CONTEXT_IMAGE,
133 };
134 uint64_t val = 0;
135 int ret;
136
137 param.ctx_id = ctx_id;
138 param.param = I915_CONTEXT_PARAM_RECOVERABLE;
139 param.value = (uint64_t)(uintptr_t)&val;
140
141 ret = intel_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, ¶m);
142 if (ret)
143 return false;
144
145 param.param = I915_CONTEXT_PARAM_CONTEXT_IMAGE;
146 param.size = sizeof(img_param);
147 param.value = (uint64_t)(uintptr_t)&img_param;
148
149 return intel_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, ¶m) == 0;
150 }
151
152 static void*
gem_mmap_offset(int drm_fd,uint32_t gem_handle,uint64_t offset,uint64_t size,uint32_t flags)153 gem_mmap_offset(int drm_fd,
154 uint32_t gem_handle,
155 uint64_t offset,
156 uint64_t size,
157 uint32_t flags)
158 {
159 struct drm_i915_gem_mmap_offset gem_mmap = {
160 .handle = gem_handle,
161 .flags = I915_MMAP_OFFSET_WB,
162 };
163 assert(offset == 0);
164
165 /* Get the fake offset back */
166 int ret = intel_ioctl(drm_fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &gem_mmap);
167 if (ret != 0 && gem_mmap.flags == I915_MMAP_OFFSET_FIXED) {
168 gem_mmap.flags =
169 (flags & I915_MMAP_WC) ? I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB,
170 ret = intel_ioctl(drm_fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &gem_mmap);
171 }
172
173 if (ret != 0)
174 return MAP_FAILED;
175
176 /* And map it */
177 void *map = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
178 drm_fd, gem_mmap.offset);
179 return map;
180 }
181
182 static void
write_malloc_data(void * out_data,int file_fd,size_t size)183 write_malloc_data(void *out_data,
184 int file_fd,
185 size_t size)
186 {
187 size_t total_read_len = 0;
188 ssize_t read_len;
189 while (total_read_len < size &&
190 (read_len = read(file_fd, out_data + total_read_len, size - total_read_len)) > 0) {
191 total_read_len += read_len;
192 }
193 assert(total_read_len == size);
194 }
195
196 static void
write_gem_bo_data(int drm_fd,uint32_t gem_handle,int file_fd,size_t size)197 write_gem_bo_data(int drm_fd,
198 uint32_t gem_handle,
199 int file_fd,
200 size_t size)
201 {
202 void *map = gem_mmap_offset(drm_fd, gem_handle, 0, size, I915_MMAP_OFFSET_WB);
203 assert(map != MAP_FAILED);
204
205 size_t total_read_len = 0;
206 ssize_t read_len;
207 while (total_read_len < size &&
208 (read_len = read(file_fd, map + total_read_len, size - total_read_len)) > 0) {
209 total_read_len += read_len;
210 }
211 munmap(map, size);
212
213 assert(total_read_len == size);
214 }
215
216 static void
skip_data(int file_fd,size_t size)217 skip_data(int file_fd, size_t size)
218 {
219 lseek(file_fd, size, SEEK_CUR);
220 }
221
222 static int
get_drm_device(struct intel_device_info * devinfo)223 get_drm_device(struct intel_device_info *devinfo)
224 {
225 drmDevicePtr devices[8];
226 int max_devices = drmGetDevices2(0, devices, 8);
227
228 int i, fd = -1;
229 for (i = 0; i < max_devices; i++) {
230 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
231 devices[i]->bustype == DRM_BUS_PCI &&
232 devices[i]->deviceinfo.pci->vendor_id == 0x8086) {
233 fd = open(devices[i]->nodes[DRM_NODE_RENDER], O_RDWR | O_CLOEXEC);
234 if (fd < 0)
235 continue;
236
237 if (!intel_get_device_info_from_fd(fd, devinfo, -1, -1) ||
238 devinfo->ver < 8) {
239 close(fd);
240 fd = -1;
241 continue;
242 }
243
244 /* Found a device! */
245 break;
246 }
247 }
248
249 return fd;
250 }
251
252 struct gem_bo {
253 off_t file_offset;
254 uint32_t gem_handle;
255 uint64_t offset;
256 uint64_t size;
257 bool hw_img;
258 };
259
260 static int
compare_bos(const void * b1,const void * b2)261 compare_bos(const void *b1, const void *b2)
262 {
263 const struct gem_bo *gem_b1 = b1, *gem_b2 = b2;
264
265 return gem_b2->size > gem_b1->size;
266 }
267
268 static void
print_help(const char * filename,FILE * f)269 print_help(const char *filename, FILE *f)
270 {
271 fprintf(f, "%s: %s [options]...\n", filename, filename);
272 fprintf(f, " -d, --dump FILE hang file to replay\n");
273 fprintf(f, " -l, --list list content of hang file (no replay)\n");
274 fprintf(f, " -s, --shader ADDR print shader at ADDR\n");
275 fprintf(f, " -h, --help print this screen\n");
276 fprintf(f, " -a, --address ADDR Find BO containing ADDR\n");
277 }
278
279 static int
execbuffer(int drm_fd,uint32_t context_id,struct util_dynarray * execbuffer_bos,struct gem_bo * exec_bo,uint64_t exec_offset)280 execbuffer(int drm_fd,
281 uint32_t context_id,
282 struct util_dynarray *execbuffer_bos,
283 struct gem_bo *exec_bo, uint64_t exec_offset)
284 {
285 struct drm_i915_gem_execbuffer2 execbuf = {
286 .buffers_ptr = (uintptr_t)(void *)util_dynarray_begin(execbuffer_bos),
287 .buffer_count = util_dynarray_num_elements(execbuffer_bos,
288 struct drm_i915_gem_exec_object2),
289 .batch_start_offset = exec_offset - exec_bo->offset,
290 .batch_len = exec_bo->size,
291 .flags = I915_EXEC_HANDLE_LUT,
292 .rsvd1 = context_id,
293 };
294
295 int ret = intel_ioctl(drm_fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, &execbuf);
296 if (ret == 0) {
297 struct drm_i915_gem_wait gem_wait = {
298 .bo_handle = exec_bo->gem_handle,
299 .timeout_ns = INT64_MAX,
300 };
301 ret = intel_ioctl(drm_fd, DRM_IOCTL_I915_GEM_WAIT, &gem_wait);
302 if (ret)
303 fprintf(stderr, "wait failed: %m\n");
304 } else {
305 fprintf(stderr, "execbuffer failed: %m\n");
306 }
307
308 return ret;
309 }
310
311 int
main(int argc,char * argv[])312 main(int argc, char *argv[])
313 {
314 bool help = false, list = false;
315 const struct option aubinator_opts[] = {
316 { "address", required_argument, NULL, 'a' },
317 { "dump", required_argument, NULL, 'd' },
318 { "shader", required_argument, NULL, 's' },
319 { "list", no_argument, NULL, 'l' },
320 { "help", no_argument, NULL, 'h' },
321 { NULL, 0, NULL, 0 },
322 };
323
324 void *mem_ctx = ralloc_context(NULL);
325
326 struct util_dynarray shader_addresses;
327
328 util_dynarray_init(&shader_addresses, mem_ctx);
329
330 const char *file = NULL;
331 uint64_t check_addr = -1;
332 int c, i;
333 while ((c = getopt_long(argc, argv, "a:d:hls:", aubinator_opts, &i)) != -1) {
334 switch (c) {
335 case 'a':
336 check_addr = strtol(optarg, NULL, 0);
337 break;
338 case 'd':
339 file = optarg;
340 break;
341 case 's': {
342 uint64_t *addr = util_dynarray_grow(&shader_addresses, uint64_t, 1);
343 *addr = strtol(optarg, NULL, 0);
344 fprintf(stderr, "shader addr=0x%016"PRIx64"\n", *addr);
345 break;
346 }
347 case 'h':
348 help = true;
349 break;
350 case 'l':
351 list = true;
352 break;
353 default:
354 break;
355 }
356 }
357
358 if (help) {
359 print_help(argv[0], stderr);
360 exit(EXIT_SUCCESS);
361 }
362
363 int file_fd = open(file, O_RDONLY);
364 if (file_fd < 0)
365 exit(EXIT_FAILURE);
366
367 struct stat file_stats;
368 if (fstat(file_fd, &file_stats) != 0)
369 exit(EXIT_FAILURE);
370
371 struct intel_device_info devinfo;
372 int drm_fd = get_drm_device(&devinfo);
373 if (drm_fd < 0)
374 exit(EXIT_FAILURE);
375
376 struct util_dynarray buffers;
377 uint64_t total_vma = 0;
378
379 util_dynarray_init(&buffers, mem_ctx);
380
381 union intel_hang_dump_block_all block_header;
382 struct intel_hang_dump_block_exec init = {
383 .offset = -1,
384 }, exec = {
385 .offset = -1,
386 };
387
388 while (read(file_fd, &block_header.base, sizeof(block_header.base)) ==
389 sizeof(block_header.base)) {
390
391 static const size_t block_size[] = {
392 [INTEL_HANG_DUMP_BLOCK_TYPE_HEADER] = sizeof(struct intel_hang_dump_block_header),
393 [INTEL_HANG_DUMP_BLOCK_TYPE_BO] = sizeof(struct intel_hang_dump_block_bo),
394 [INTEL_HANG_DUMP_BLOCK_TYPE_MAP] = sizeof(struct intel_hang_dump_block_map),
395 [INTEL_HANG_DUMP_BLOCK_TYPE_EXEC] = sizeof(struct intel_hang_dump_block_exec),
396 [INTEL_HANG_DUMP_BLOCK_TYPE_HW_IMAGE] = sizeof(struct intel_hang_dump_block_hw_image),
397 };
398
399 assert(block_header.base.type < ARRAY_SIZE(block_size));
400
401 size_t remaining_size = block_size[block_header.base.type] - sizeof(block_header.base);
402 ssize_t ret = read(file_fd, &block_header.base + 1, remaining_size);
403 bool has_hw_image = false;
404 assert(ret == remaining_size);
405
406 switch (block_header.base.type) {
407 case INTEL_HANG_DUMP_BLOCK_TYPE_HEADER:
408 assert(block_header.header.magic == INTEL_HANG_DUMP_MAGIC);
409 assert(block_header.header.version == INTEL_HANG_DUMP_VERSION);
410 break;
411
412 case INTEL_HANG_DUMP_BLOCK_TYPE_BO: {
413 struct gem_bo *bo = util_dynarray_grow(&buffers, struct gem_bo, 1);
414 *bo = (struct gem_bo) {
415 .file_offset = lseek(file_fd, 0, SEEK_CUR),
416 .offset = block_header.bo.offset,
417 .size = block_header.bo.size,
418 };
419 total_vma += bo->size;
420 skip_data(file_fd, bo->size);
421 if (list) {
422 fprintf(stderr, "buffer: offset=0x%016"PRIx64" size=0x%016"PRIx64" name=%s\n",
423 bo->offset, bo->size, block_header.bo.name);
424 }
425 break;
426 }
427
428 case INTEL_HANG_DUMP_BLOCK_TYPE_HW_IMAGE: {
429 struct gem_bo *bo = util_dynarray_grow(&buffers, struct gem_bo, 1);
430 *bo = (struct gem_bo) {
431 .file_offset = lseek(file_fd, 0, SEEK_CUR),
432 .offset = 0,
433 .size = block_header.hw_img.size,
434 .hw_img = true,
435 };
436 total_vma += bo->size;
437 skip_data(file_fd, bo->size);
438 if (list) {
439 fprintf(stderr, "buffer: offset=0x%016"PRIx64" size=0x%016"PRIx64" name=hw_img\n",
440 bo->offset, bo->size);
441 }
442 has_hw_image = true;
443 break;
444 }
445
446 case INTEL_HANG_DUMP_BLOCK_TYPE_MAP: {
447 struct gem_bo *bo = util_dynarray_grow(&buffers, struct gem_bo, 1);
448 *bo = (struct gem_bo) {
449 .file_offset = 0,
450 .offset = block_header.map.offset,
451 .size = block_header.map.size,
452 };
453 total_vma += bo->size;
454 if (list) {
455 fprintf(stderr, "map : offset=0x%016"PRIx64" size=0x%016"PRIx64" name=%s\n",
456 bo->offset, bo->size, block_header.map.name);
457 }
458 break;
459 }
460
461 case INTEL_HANG_DUMP_BLOCK_TYPE_EXEC: {
462 if (init.offset == 0 && !has_hw_image) {
463 if (list)
464 fprintf(stderr, "init : offset=0x%016"PRIx64"\n", block_header.exec.offset);
465 init = block_header.exec;
466 } else {
467 if (list)
468 fprintf(stderr, "exec : offset=0x%016"PRIx64"\n", block_header.exec.offset);
469 exec = block_header.exec;
470 }
471 break;
472 }
473
474 default:
475 unreachable("Invalid block type");
476 }
477 }
478
479 fprintf(stderr, "total_vma: 0x%016"PRIx64"\n", total_vma);
480
481 if (check_addr != -1) {
482 struct gem_bo *check_bo = NULL;
483 util_dynarray_foreach(&buffers, struct gem_bo, bo) {
484 if (check_addr >= bo->offset && check_addr < (bo->offset + bo->size)) {
485 check_bo = bo;
486 break;
487 }
488 }
489
490 if (check_bo) {
491 fprintf(stderr, "address=0x%016"PRIx64" found in buffer 0x%016"PRIx64" size=0x%016"PRIx64"\n",
492 check_addr, check_bo->offset, check_bo->size);
493 } else {
494 fprintf(stderr, "address=0x%016"PRIx64" not found in buffer list\n", check_addr);
495 }
496 }
497
498 util_dynarray_foreach(&shader_addresses, uint64_t, addr) {
499 bool found = false;
500 util_dynarray_foreach(&buffers, struct gem_bo, bo) {
501 if (*addr < bo->offset || *addr >= (bo->offset + bo->size))
502 continue;
503 if (!bo->file_offset)
504 break;
505
506 uint64_t aligned_offset = ROUND_DOWN_TO(bo->file_offset, 4096);
507 uint64_t remaining_length = file_stats.st_size - aligned_offset;
508 void *map = mmap(NULL, remaining_length, PROT_READ, MAP_PRIVATE,
509 file_fd, aligned_offset);
510 if (map == MAP_FAILED)
511 break;
512
513 found = true;
514 fprintf(stderr, "shader at 0x%016"PRIx64" file_offset=0%016"PRIx64" addr_offset=%016"PRIx64":\n", *addr,
515 (bo->file_offset - aligned_offset), (*addr - bo->offset));
516 if (devinfo.ver >= 9) {
517 struct brw_isa_info _isa, *isa = &_isa;
518 brw_init_isa_info(isa, &devinfo);
519 brw_disassemble_with_errors(isa,
520 map + (bo->file_offset - aligned_offset) + (*addr - bo->offset),
521 0, stderr);
522 } else {
523 struct elk_isa_info _isa, *isa = &_isa;
524 elk_init_isa_info(isa, &devinfo);
525 elk_disassemble_with_errors(isa,
526 map + (bo->file_offset - aligned_offset) + (*addr - bo->offset),
527 0, stderr);
528 }
529
530 munmap(map, remaining_length);
531 }
532
533 if (!found)
534 fprintf(stderr, "shader at 0x%016"PRIx64" not found\n", *addr);
535 }
536
537 if (!list && util_dynarray_num_elements(&shader_addresses, uint64_t) == 0) {
538 /* Sort buffers by size */
539 qsort(util_dynarray_begin(&buffers),
540 util_dynarray_num_elements(&buffers, struct gem_bo),
541 sizeof(struct gem_bo),
542 compare_bos);
543
544 void *hw_img = NULL;
545 uint32_t hw_img_size = 0;
546
547 /* Allocate BOs populate them */
548 uint64_t gem_allocated = 0;
549 util_dynarray_foreach(&buffers, struct gem_bo, bo) {
550 lseek(file_fd, bo->file_offset, SEEK_SET);
551 if (bo->hw_img) {
552 hw_img = malloc(bo->size);
553 write_malloc_data(hw_img, file_fd, bo->size);
554 hw_img_size = bo->size;
555 } else {
556 bo->gem_handle = gem_create(drm_fd, bo->size);
557 write_gem_bo_data(drm_fd, bo->gem_handle, file_fd, bo->size);
558 }
559
560 gem_allocated += bo->size;
561 }
562
563 uint32_t ctx_id = gem_context_create(drm_fd);
564 if (ctx_id == 0) {
565 fprintf(stderr, "fail to create context: %s\n", strerror(errno));
566 return EXIT_FAILURE;
567 }
568
569 if (hw_img != NULL) {
570 if (!gem_context_set_hw_image(drm_fd, ctx_id, hw_img, hw_img_size)) {
571 fprintf(stderr, "fail to set context hw img: %s\n", strerror(errno));
572 return EXIT_FAILURE;
573 }
574 }
575
576 struct util_dynarray execbuffer_bos;
577 util_dynarray_init(&execbuffer_bos, mem_ctx);
578
579 struct gem_bo *init_bo = NULL, *batch_bo = NULL;
580 util_dynarray_foreach(&buffers, struct gem_bo, bo) {
581 if (bo->offset <= init.offset &&
582 (bo->offset + bo->size) > init.offset) {
583 init_bo = bo;
584 continue;
585 }
586
587 if (bo->offset <= exec.offset &&
588 (bo->offset + bo->size) > exec.offset) {
589 batch_bo = bo;
590 continue;
591 }
592
593 if (bo->hw_img)
594 continue;
595
596 struct drm_i915_gem_exec_object2 *execbuf_bo =
597 util_dynarray_grow(&execbuffer_bos, struct drm_i915_gem_exec_object2, 1);
598 *execbuf_bo = (struct drm_i915_gem_exec_object2) {
599 .handle = bo->gem_handle,
600 .relocation_count = 0,
601 .relocs_ptr = 0,
602 .flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS |
603 EXEC_OBJECT_PINNED |
604 EXEC_OBJECT_CAPTURE,
605 .offset = intel_canonical_address(bo->offset),
606 };
607 }
608
609 assert(batch_bo != NULL);
610
611 struct drm_i915_gem_exec_object2 *execbuf_bo =
612 util_dynarray_grow(&execbuffer_bos, struct drm_i915_gem_exec_object2, 1);
613
614 int ret;
615
616 if (init_bo) {
617 fprintf(stderr, "init: 0x%016"PRIx64"\n", init_bo->offset);
618 *execbuf_bo = (struct drm_i915_gem_exec_object2) {
619 .handle = init_bo->gem_handle,
620 .relocation_count = 0,
621 .relocs_ptr = 0,
622 .flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS |
623 EXEC_OBJECT_PINNED |
624 EXEC_OBJECT_WRITE /* to be able to wait on the BO */ |
625 EXEC_OBJECT_CAPTURE,
626 .offset = intel_canonical_address(init_bo->offset),
627 };
628 ret = execbuffer(drm_fd, ctx_id, &execbuffer_bos, init_bo, init.offset);
629 if (ret != 0) {
630 fprintf(stderr, "initialization buffer failed to execute errno=%i\n", errno);
631 exit(-1);
632 }
633 } else {
634 fprintf(stderr, "no init BO\n");
635 }
636
637 if (batch_bo) {
638 fprintf(stderr, "exec: 0x%016"PRIx64" aperture=%.2fMb\n", batch_bo->offset,
639 gem_allocated / 1024.0 / 1024.0);
640 *execbuf_bo = (struct drm_i915_gem_exec_object2) {
641 .handle = batch_bo->gem_handle,
642 .relocation_count = 0,
643 .relocs_ptr = 0,
644 .flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS |
645 EXEC_OBJECT_PINNED |
646 EXEC_OBJECT_WRITE /* to be able to wait on the BO */ |
647 EXEC_OBJECT_CAPTURE,
648 .offset = intel_canonical_address(batch_bo->offset),
649 };
650 ret = execbuffer(drm_fd, ctx_id, &execbuffer_bos, batch_bo, exec.offset);
651 if (ret != 0) {
652 fprintf(stderr, "replayed buffer failed to execute errno=%i\n", errno);
653 exit(-1);
654 } else {
655 fprintf(stderr, "exec completed successfully\n");
656 }
657 } else {
658 fprintf(stderr, "no exec BO\n");
659 }
660 }
661
662 close(drm_fd);
663 close(file_fd);
664
665 ralloc_free(mem_ctx);
666
667 return EXIT_SUCCESS;
668 }
669