1 /*
2 * Copyright 2016 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6 #include <assert.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <inttypes.h>
10 #include <pthread.h>
11 #include <stdint.h>
12 #include <stdio.h>
13 #include <string.h>
14 #include <sys/mman.h>
15 #include <sys/types.h>
16 #include <unistd.h>
17 #include <xf86drm.h>
18
19 #ifdef __ANDROID__
20 #include <cutils/log.h>
21 #include <libgen.h>
22 #define MINIGBM_DEBUG "vendor.minigbm.debug"
23 #else
24 #define MINIGBM_DEBUG "MINIGBM_DEBUG"
25 #endif
26
27 #include "drv_helpers.h"
28 #include "drv_priv.h"
29 #include "util.h"
30
31 #ifdef DRV_AMDGPU
32 extern const struct backend backend_amdgpu;
33 #endif
34 #ifdef DRV_I915
35 extern const struct backend backend_i915;
36 #endif
37 #ifdef DRV_MSM
38 extern const struct backend backend_msm;
39 #endif
40 #ifdef DRV_VC4
41 extern const struct backend backend_vc4;
42 #endif
43
44 // Dumb / generic drivers
45 extern const struct backend backend_evdi;
46 extern const struct backend backend_marvell;
47 extern const struct backend backend_mediatek;
48 extern const struct backend backend_meson;
49 extern const struct backend backend_nouveau;
50 extern const struct backend backend_komeda;
51 extern const struct backend backend_radeon;
52 extern const struct backend backend_rockchip;
53 extern const struct backend backend_sun4i_drm;
54 extern const struct backend backend_synaptics;
55 extern const struct backend backend_virtgpu;
56 extern const struct backend backend_udl;
57 extern const struct backend backend_vkms;
58
59 extern const struct backend backend_mock;
60
61 static const struct backend *drv_backend_list[] = {
62 #ifdef DRV_AMDGPU
63 &backend_amdgpu,
64 #endif
65 #ifdef DRV_I915
66 &backend_i915,
67 #endif
68 #ifdef DRV_MSM
69 &backend_msm,
70 #endif
71 #ifdef DRV_VC4
72 &backend_vc4,
73 #endif
74 &backend_evdi, &backend_komeda, &backend_marvell, &backend_mediatek,
75 &backend_meson, &backend_nouveau, &backend_radeon, &backend_rockchip,
76 &backend_sun4i_drm, &backend_synaptics, &backend_udl, &backend_virtgpu,
77 &backend_vkms, &backend_mock
78 };
79
drv_preload(bool load)80 void drv_preload(bool load)
81 {
82 unsigned int i;
83 for (i = 0; i < ARRAY_SIZE(drv_backend_list); i++) {
84 const struct backend *b = drv_backend_list[i];
85 if (b->preload)
86 b->preload(load);
87 }
88 }
89
drv_get_backend(int fd)90 static const struct backend *drv_get_backend(int fd)
91 {
92 drmVersionPtr drm_version;
93 unsigned int i;
94
95 drm_version = drmGetVersion(fd);
96
97 if (!drm_version)
98 return NULL;
99
100 for (i = 0; i < ARRAY_SIZE(drv_backend_list); i++) {
101 const struct backend *b = drv_backend_list[i];
102 if (!strcmp(drm_version->name, b->name)) {
103 drmFreeVersion(drm_version);
104 return b;
105 }
106 }
107
108 drmFreeVersion(drm_version);
109 return NULL;
110 }
111
drv_create(int fd)112 struct driver *drv_create(int fd)
113 {
114 struct driver *drv;
115 int ret;
116
117 drv = (struct driver *)calloc(1, sizeof(*drv));
118
119 if (!drv)
120 return NULL;
121
122 const char *minigbm_debug;
123 minigbm_debug = drv_get_os_option(MINIGBM_DEBUG);
124 drv->compression =
125 (minigbm_debug == NULL) || (strstr(minigbm_debug, "nocompression") == NULL);
126 drv->log_bos = (minigbm_debug && strstr(minigbm_debug, "log_bos") != NULL);
127
128 drv->fd = fd;
129 drv->backend = drv_get_backend(fd);
130
131 if (!drv->backend)
132 goto free_driver;
133
134 if (pthread_mutex_init(&drv->buffer_table_lock, NULL))
135 goto free_driver;
136
137 drv->buffer_table = drmHashCreate();
138 if (!drv->buffer_table)
139 goto free_buffer_table_lock;
140
141 if (pthread_mutex_init(&drv->mappings_lock, NULL))
142 goto free_buffer_table;
143
144 drv->mappings = drv_array_init(sizeof(struct mapping));
145 if (!drv->mappings)
146 goto free_mappings_lock;
147
148 drv->combos = drv_array_init(sizeof(struct combination));
149 if (!drv->combos)
150 goto free_mappings;
151
152 if (drv->backend->init) {
153 ret = drv->backend->init(drv);
154 if (ret) {
155 drv_array_destroy(drv->combos);
156 goto free_mappings;
157 }
158 }
159
160 return drv;
161
162 free_mappings:
163 drv_array_destroy(drv->mappings);
164 free_mappings_lock:
165 pthread_mutex_destroy(&drv->mappings_lock);
166 free_buffer_table:
167 drmHashDestroy(drv->buffer_table);
168 free_buffer_table_lock:
169 pthread_mutex_destroy(&drv->buffer_table_lock);
170 free_driver:
171 free(drv);
172 return NULL;
173 }
174
drv_destroy(struct driver * drv)175 void drv_destroy(struct driver *drv)
176 {
177 if (drv->backend->close)
178 drv->backend->close(drv);
179
180 drv_array_destroy(drv->combos);
181
182 drv_array_destroy(drv->mappings);
183 pthread_mutex_destroy(&drv->mappings_lock);
184
185 drmHashDestroy(drv->buffer_table);
186 pthread_mutex_destroy(&drv->buffer_table_lock);
187
188 free(drv);
189 }
190
drv_get_fd(struct driver * drv)191 int drv_get_fd(struct driver *drv)
192 {
193 return drv->fd;
194 }
195
drv_get_name(struct driver * drv)196 const char *drv_get_name(struct driver *drv)
197 {
198 return drv->backend->name;
199 }
200
drv_get_combination(struct driver * drv,uint32_t format,uint64_t use_flags)201 struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t use_flags)
202 {
203 struct combination *curr, *best;
204
205 if (format == DRM_FORMAT_NONE || use_flags == BO_USE_NONE)
206 return 0;
207
208 best = NULL;
209 uint32_t i;
210 for (i = 0; i < drv_array_size(drv->combos); i++) {
211 curr = drv_array_at_idx(drv->combos, i);
212 if ((format == curr->format) && use_flags == (curr->use_flags & use_flags))
213 if (!best || best->metadata.priority < curr->metadata.priority)
214 best = curr;
215 }
216
217 return best;
218 }
219
drv_bo_new(struct driver * drv,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags,bool is_test_buffer)220 struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
221 uint64_t use_flags, bool is_test_buffer)
222 {
223
224 struct bo *bo;
225 bo = (struct bo *)calloc(1, sizeof(*bo));
226
227 if (!bo)
228 return NULL;
229
230 bo->drv = drv;
231 bo->meta.width = width;
232 bo->meta.height = height;
233 bo->meta.format = format;
234 bo->meta.use_flags = use_flags;
235 bo->meta.num_planes = drv_num_planes_from_format(format);
236 bo->is_test_buffer = is_test_buffer;
237
238 if (!bo->meta.num_planes) {
239 free(bo);
240 errno = EINVAL;
241 return NULL;
242 }
243
244 return bo;
245 }
246
drv_bo_mapping_destroy(struct bo * bo)247 static void drv_bo_mapping_destroy(struct bo *bo)
248 {
249 struct driver *drv = bo->drv;
250 uint32_t idx = 0;
251
252 /*
253 * This function is called right before the buffer is destroyed. It will free any mappings
254 * associated with the buffer.
255 */
256 pthread_mutex_lock(&drv->mappings_lock);
257 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
258 while (idx < drv_array_size(drv->mappings)) {
259 struct mapping *mapping =
260 (struct mapping *)drv_array_at_idx(drv->mappings, idx);
261 if (mapping->vma->handle != bo->handle.u32) {
262 idx++;
263 continue;
264 }
265
266 if (!--mapping->vma->refcount) {
267 int ret = drv->backend->bo_unmap(bo, mapping->vma);
268 if (ret) {
269 pthread_mutex_unlock(&drv->mappings_lock);
270 assert(ret);
271 drv_loge("munmap failed\n");
272 return;
273 }
274
275 free(mapping->vma);
276 }
277
278 /* This shrinks and shifts the array, so don't increment idx. */
279 drv_array_remove(drv->mappings, idx);
280 }
281 }
282 pthread_mutex_unlock(&drv->mappings_lock);
283 }
284
285 /*
286 * Acquire a reference on plane buffers of the bo.
287 */
drv_bo_acquire(struct bo * bo)288 static void drv_bo_acquire(struct bo *bo)
289 {
290 struct driver *drv = bo->drv;
291
292 pthread_mutex_lock(&drv->buffer_table_lock);
293 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
294 uintptr_t num = 0;
295
296 if (!drmHashLookup(drv->buffer_table, bo->handle.u32, (void **)&num))
297 drmHashDelete(drv->buffer_table, bo->handle.u32);
298
299 drmHashInsert(drv->buffer_table, bo->handle.u32, (void *)(num + 1));
300 }
301 pthread_mutex_unlock(&drv->buffer_table_lock);
302 }
303
304 /*
305 * Release a reference on plane buffers of the bo. Return true when the bo has lost all its
306 * references. Otherwise, return false.
307 */
drv_bo_release(struct bo * bo)308 static bool drv_bo_release(struct bo *bo)
309 {
310 struct driver *drv = bo->drv;
311 uintptr_t num;
312
313 if (drv->backend->bo_release)
314 drv->backend->bo_release(bo);
315
316 pthread_mutex_lock(&drv->buffer_table_lock);
317 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
318 if (!drmHashLookup(drv->buffer_table, bo->handle.u32, (void **)&num)) {
319 drmHashDelete(drv->buffer_table, bo->handle.u32);
320
321 if (num > 1) {
322 drmHashInsert(drv->buffer_table, bo->handle.u32, (void *)(num - 1));
323 }
324 }
325 }
326
327 /* The same buffer can back multiple planes with different offsets. */
328 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
329 if (!drmHashLookup(drv->buffer_table, bo->handle.u32, (void **)&num)) {
330 /* num is positive if found in the hashmap. */
331 pthread_mutex_unlock(&drv->buffer_table_lock);
332 return false;
333 }
334 }
335 pthread_mutex_unlock(&drv->buffer_table_lock);
336
337 return true;
338 }
339
drv_bo_create(struct driver * drv,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags)340 struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
341 uint64_t use_flags)
342 {
343 int ret;
344 struct bo *bo;
345 bool is_test_alloc;
346
347 is_test_alloc = use_flags & BO_USE_TEST_ALLOC;
348 use_flags &= ~BO_USE_TEST_ALLOC;
349
350 bo = drv_bo_new(drv, width, height, format, use_flags, is_test_alloc);
351
352 if (!bo)
353 return NULL;
354
355 ret = -EINVAL;
356 if (drv->backend->bo_compute_metadata) {
357 ret = drv->backend->bo_compute_metadata(bo, width, height, format, use_flags, NULL,
358 0);
359 if (!is_test_alloc && ret == 0)
360 ret = drv->backend->bo_create_from_metadata(bo);
361 } else if (!is_test_alloc) {
362 ret = drv->backend->bo_create(bo, width, height, format, use_flags);
363 }
364
365 if (ret) {
366 errno = -ret;
367 free(bo);
368 return NULL;
369 }
370
371 drv_bo_acquire(bo);
372
373 if (drv->log_bos)
374 drv_bo_log_info(bo, "legacy created");
375
376 return bo;
377 }
378
drv_bo_create_with_modifiers(struct driver * drv,uint32_t width,uint32_t height,uint32_t format,const uint64_t * modifiers,uint32_t count)379 struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint32_t height,
380 uint32_t format, const uint64_t *modifiers, uint32_t count)
381 {
382 int ret;
383 struct bo *bo;
384
385 if (!drv->backend->bo_create_with_modifiers && !drv->backend->bo_compute_metadata) {
386 errno = ENOENT;
387 return NULL;
388 }
389
390 bo = drv_bo_new(drv, width, height, format, BO_USE_NONE, false);
391
392 if (!bo)
393 return NULL;
394
395 ret = -EINVAL;
396 if (drv->backend->bo_compute_metadata) {
397 ret = drv->backend->bo_compute_metadata(bo, width, height, format, BO_USE_NONE,
398 modifiers, count);
399 if (ret == 0)
400 ret = drv->backend->bo_create_from_metadata(bo);
401 } else {
402 ret = drv->backend->bo_create_with_modifiers(bo, width, height, format, modifiers,
403 count);
404 }
405
406 if (ret) {
407 free(bo);
408 return NULL;
409 }
410
411 drv_bo_acquire(bo);
412
413 if (drv->log_bos)
414 drv_bo_log_info(bo, "created");
415
416 return bo;
417 }
418
drv_bo_destroy(struct bo * bo)419 void drv_bo_destroy(struct bo *bo)
420 {
421 if (!bo->is_test_buffer && drv_bo_release(bo)) {
422 drv_bo_mapping_destroy(bo);
423 bo->drv->backend->bo_destroy(bo);
424 }
425
426 free(bo);
427 }
428
drv_bo_import(struct driver * drv,struct drv_import_fd_data * data)429 struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
430 {
431 int ret;
432 size_t plane;
433 struct bo *bo;
434 off_t seek_end;
435
436 bo = drv_bo_new(drv, data->width, data->height, data->format, data->use_flags, false);
437
438 if (!bo)
439 return NULL;
440
441 ret = drv->backend->bo_import(bo, data);
442 if (ret) {
443 free(bo);
444 return NULL;
445 }
446
447 drv_bo_acquire(bo);
448
449 bo->meta.format_modifier = data->format_modifier;
450 for (plane = 0; plane < bo->meta.num_planes; plane++) {
451 bo->meta.strides[plane] = data->strides[plane];
452 bo->meta.offsets[plane] = data->offsets[plane];
453
454 seek_end = lseek(data->fds[plane], 0, SEEK_END);
455 if (seek_end == (off_t)(-1)) {
456 drv_loge("lseek() failed with %s\n", strerror(errno));
457 goto destroy_bo;
458 }
459
460 lseek(data->fds[plane], 0, SEEK_SET);
461 if (plane == bo->meta.num_planes - 1 || data->offsets[plane + 1] == 0)
462 bo->meta.sizes[plane] = seek_end - data->offsets[plane];
463 else
464 bo->meta.sizes[plane] = data->offsets[plane + 1] - data->offsets[plane];
465
466 if ((int64_t)bo->meta.offsets[plane] + bo->meta.sizes[plane] > seek_end) {
467 drv_loge("buffer size is too large.\n");
468 goto destroy_bo;
469 }
470
471 bo->meta.total_size += bo->meta.sizes[plane];
472 }
473
474 if (drv->log_bos)
475 drv_bo_log_info(bo, "imported");
476
477 return bo;
478
479 destroy_bo:
480 drv_bo_destroy(bo);
481 return NULL;
482 }
483
drv_bo_map(struct bo * bo,const struct rectangle * rect,uint32_t map_flags,struct mapping ** map_data,size_t plane)484 void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags,
485 struct mapping **map_data, size_t plane)
486 {
487 struct driver *drv = bo->drv;
488 uint32_t i;
489 uint8_t *addr;
490 struct mapping mapping = { 0 };
491
492 assert(rect->width >= 0);
493 assert(rect->height >= 0);
494 assert(rect->x + rect->width <= drv_bo_get_width(bo));
495 assert(rect->y + rect->height <= drv_bo_get_height(bo));
496 assert(BO_MAP_READ_WRITE & map_flags);
497 /* No CPU access for protected buffers. */
498 assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
499
500 if (bo->is_test_buffer)
501 return MAP_FAILED;
502
503 mapping.rect = *rect;
504 mapping.refcount = 1;
505
506 pthread_mutex_lock(&drv->mappings_lock);
507
508 for (i = 0; i < drv_array_size(drv->mappings); i++) {
509 struct mapping *prior = (struct mapping *)drv_array_at_idx(drv->mappings, i);
510 if (prior->vma->handle != bo->handle.u32 || prior->vma->map_flags != map_flags)
511 continue;
512
513 if (rect->x != prior->rect.x || rect->y != prior->rect.y ||
514 rect->width != prior->rect.width || rect->height != prior->rect.height)
515 continue;
516
517 prior->refcount++;
518 *map_data = prior;
519 goto exact_match;
520 }
521
522 for (i = 0; i < drv_array_size(drv->mappings); i++) {
523 struct mapping *prior = (struct mapping *)drv_array_at_idx(drv->mappings, i);
524 if (prior->vma->handle != bo->handle.u32 || prior->vma->map_flags != map_flags)
525 continue;
526
527 prior->vma->refcount++;
528 mapping.vma = prior->vma;
529 goto success;
530 }
531
532 mapping.vma = calloc(1, sizeof(*mapping.vma));
533 if (!mapping.vma) {
534 *map_data = NULL;
535 pthread_mutex_unlock(&drv->mappings_lock);
536 return MAP_FAILED;
537 }
538
539 memcpy(mapping.vma->map_strides, bo->meta.strides, sizeof(mapping.vma->map_strides));
540 addr = drv->backend->bo_map(bo, mapping.vma, map_flags);
541 if (addr == MAP_FAILED) {
542 *map_data = NULL;
543 free(mapping.vma);
544 pthread_mutex_unlock(&drv->mappings_lock);
545 return MAP_FAILED;
546 }
547
548 mapping.vma->refcount = 1;
549 mapping.vma->addr = addr;
550 mapping.vma->handle = bo->handle.u32;
551 mapping.vma->map_flags = map_flags;
552
553 success:
554 *map_data = drv_array_append(drv->mappings, &mapping);
555 exact_match:
556 drv_bo_invalidate(bo, *map_data);
557 addr = (uint8_t *)((*map_data)->vma->addr);
558 addr += drv_bo_get_plane_offset(bo, plane);
559 pthread_mutex_unlock(&drv->mappings_lock);
560 return (void *)addr;
561 }
562
drv_bo_unmap(struct bo * bo,struct mapping * mapping)563 int drv_bo_unmap(struct bo *bo, struct mapping *mapping)
564 {
565 struct driver *drv = bo->drv;
566 uint32_t i;
567 int ret = 0;
568
569 pthread_mutex_lock(&drv->mappings_lock);
570
571 if (--mapping->refcount)
572 goto out;
573
574 if (!--mapping->vma->refcount) {
575 ret = drv->backend->bo_unmap(bo, mapping->vma);
576 free(mapping->vma);
577 }
578
579 for (i = 0; i < drv_array_size(drv->mappings); i++) {
580 if (mapping == (struct mapping *)drv_array_at_idx(drv->mappings, i)) {
581 drv_array_remove(drv->mappings, i);
582 break;
583 }
584 }
585
586 out:
587 pthread_mutex_unlock(&drv->mappings_lock);
588 return ret;
589 }
590
drv_bo_cached(struct bo * bo)591 bool drv_bo_cached(struct bo *bo)
592 {
593 return bo->meta.cached;
594 }
595
drv_bo_invalidate(struct bo * bo,struct mapping * mapping)596 int drv_bo_invalidate(struct bo *bo, struct mapping *mapping)
597 {
598 int ret = 0;
599
600 assert(mapping);
601 assert(mapping->vma);
602 assert(mapping->refcount > 0);
603 assert(mapping->vma->refcount > 0);
604
605 if (bo->drv->backend->bo_invalidate)
606 ret = bo->drv->backend->bo_invalidate(bo, mapping);
607
608 return ret;
609 }
610
drv_bo_flush(struct bo * bo,struct mapping * mapping)611 int drv_bo_flush(struct bo *bo, struct mapping *mapping)
612 {
613 int ret = 0;
614
615 assert(mapping);
616 assert(mapping->vma);
617 assert(mapping->refcount > 0);
618 assert(mapping->vma->refcount > 0);
619
620 if (bo->drv->backend->bo_flush)
621 ret = bo->drv->backend->bo_flush(bo, mapping);
622
623 return ret;
624 }
625
drv_bo_flush_or_unmap(struct bo * bo,struct mapping * mapping)626 int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping)
627 {
628 int ret = 0;
629
630 assert(mapping);
631 assert(mapping->vma);
632 assert(mapping->refcount > 0);
633 assert(mapping->vma->refcount > 0);
634 assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
635
636 if (bo->drv->backend->bo_flush)
637 ret = bo->drv->backend->bo_flush(bo, mapping);
638 else
639 ret = drv_bo_unmap(bo, mapping);
640
641 return ret;
642 }
643
drv_bo_get_width(struct bo * bo)644 uint32_t drv_bo_get_width(struct bo *bo)
645 {
646 return bo->meta.width;
647 }
648
drv_bo_get_height(struct bo * bo)649 uint32_t drv_bo_get_height(struct bo *bo)
650 {
651 return bo->meta.height;
652 }
653
drv_bo_get_num_planes(struct bo * bo)654 size_t drv_bo_get_num_planes(struct bo *bo)
655 {
656 return bo->meta.num_planes;
657 }
658
drv_bo_get_plane_handle(struct bo * bo,size_t plane)659 union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane)
660 {
661 return bo->handle;
662 }
663
664 #ifndef DRM_RDWR
665 #define DRM_RDWR O_RDWR
666 #endif
667
drv_bo_get_plane_fd(struct bo * bo,size_t plane)668 int drv_bo_get_plane_fd(struct bo *bo, size_t plane)
669 {
670
671 int ret, fd;
672 assert(plane < bo->meta.num_planes);
673
674 if (bo->is_test_buffer)
675 return -EINVAL;
676
677 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handle.u32, DRM_CLOEXEC | DRM_RDWR, &fd);
678
679 // Older DRM implementations blocked DRM_RDWR, but gave a read/write mapping anyways
680 if (ret)
681 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handle.u32, DRM_CLOEXEC, &fd);
682
683 if (ret)
684 drv_loge("Failed to get plane fd: %s\n", strerror(errno));
685
686 return (ret) ? ret : fd;
687 }
688
drv_bo_get_plane_offset(struct bo * bo,size_t plane)689 uint32_t drv_bo_get_plane_offset(struct bo *bo, size_t plane)
690 {
691 assert(plane < bo->meta.num_planes);
692 return bo->meta.offsets[plane];
693 }
694
drv_bo_get_plane_size(struct bo * bo,size_t plane)695 uint32_t drv_bo_get_plane_size(struct bo *bo, size_t plane)
696 {
697 assert(plane < bo->meta.num_planes);
698 return bo->meta.sizes[plane];
699 }
700
drv_bo_get_plane_stride(struct bo * bo,size_t plane)701 uint32_t drv_bo_get_plane_stride(struct bo *bo, size_t plane)
702 {
703 assert(plane < bo->meta.num_planes);
704 return bo->meta.strides[plane];
705 }
706
drv_bo_get_format_modifier(struct bo * bo)707 uint64_t drv_bo_get_format_modifier(struct bo *bo)
708 {
709 return bo->meta.format_modifier;
710 }
711
drv_bo_get_format(struct bo * bo)712 uint32_t drv_bo_get_format(struct bo *bo)
713 {
714 return bo->meta.format;
715 }
716
drv_bo_get_tiling(struct bo * bo)717 uint32_t drv_bo_get_tiling(struct bo *bo)
718 {
719 return bo->meta.tiling;
720 }
721
drv_bo_get_use_flags(struct bo * bo)722 uint64_t drv_bo_get_use_flags(struct bo *bo)
723 {
724 return bo->meta.use_flags;
725 }
726
drv_bo_get_total_size(struct bo * bo)727 size_t drv_bo_get_total_size(struct bo *bo)
728 {
729 return bo->meta.total_size;
730 }
731
drv_bo_log_info(const struct bo * bo,const char * prefix)732 void drv_bo_log_info(const struct bo *bo, const char *prefix)
733 {
734 const struct bo_metadata *meta = &bo->meta;
735
736 drv_logd("%s %s bo %p: %dx%d '%c%c%c%c' tiling %d plane %zu mod 0x%" PRIx64
737 " use 0x%" PRIx64 " size %zu\n",
738 prefix, bo->drv->backend->name, bo, meta->width, meta->height, meta->format & 0xff,
739 (meta->format >> 8) & 0xff, (meta->format >> 16) & 0xff,
740 (meta->format >> 24) & 0xff, meta->tiling, meta->num_planes, meta->format_modifier,
741 meta->use_flags, meta->total_size);
742 for (uint32_t i = 0; i < meta->num_planes; i++) {
743 drv_logd(" bo %p plane %d: offset %d size %d stride %d\n", bo, i, meta->offsets[i],
744 meta->sizes[i], meta->strides[i]);
745 }
746 }
747
748 /*
749 * Map internal fourcc codes back to standard fourcc codes.
750 */
drv_get_standard_fourcc(uint32_t fourcc_internal)751 uint32_t drv_get_standard_fourcc(uint32_t fourcc_internal)
752 {
753 return (fourcc_internal == DRM_FORMAT_YVU420_ANDROID) ? DRM_FORMAT_YVU420 : fourcc_internal;
754 }
755
drv_resolve_format_and_use_flags(struct driver * drv,uint32_t format,uint64_t use_flags,uint32_t * out_format,uint64_t * out_use_flags)756 void drv_resolve_format_and_use_flags(struct driver *drv, uint32_t format, uint64_t use_flags,
757 uint32_t *out_format, uint64_t *out_use_flags)
758 {
759 assert(drv->backend->resolve_format_and_use_flags);
760
761 drv->backend->resolve_format_and_use_flags(drv, format, use_flags, out_format,
762 out_use_flags);
763 }
764
drv_log_prefix(enum drv_log_level level,const char * prefix,const char * func,int line,const char * format,...)765 void drv_log_prefix(enum drv_log_level level, const char *prefix, const char *func, int line,
766 const char *format, ...)
767 {
768 char buf[50];
769 snprintf(buf, sizeof(buf), "[%s:%s(%d)]", prefix, func, line);
770
771 va_list args;
772 va_start(args, format);
773 #ifdef __ANDROID__
774 int prio = ANDROID_LOG_ERROR;
775 switch (level) {
776 case DRV_LOGV:
777 prio = ANDROID_LOG_VERBOSE;
778 break;
779 case DRV_LOGD:
780 prio = ANDROID_LOG_DEBUG;
781 break;
782 case DRV_LOGI:
783 prio = ANDROID_LOG_INFO;
784 break;
785 case DRV_LOGE:
786 default:
787 break;
788 };
789 __android_log_vprint(prio, buf, format, args);
790 #else
791 fprintf(stderr, "%s ", buf);
792 vfprintf(stderr, format, args);
793 #endif
794 va_end(args);
795 }
796
drv_resource_info(struct bo * bo,uint32_t strides[DRV_MAX_PLANES],uint32_t offsets[DRV_MAX_PLANES],uint64_t * format_modifier)797 int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
798 uint32_t offsets[DRV_MAX_PLANES], uint64_t *format_modifier)
799 {
800 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
801 strides[plane] = bo->meta.strides[plane];
802 offsets[plane] = bo->meta.offsets[plane];
803 }
804 *format_modifier = bo->meta.format_modifier;
805
806 if (bo->drv->backend->resource_info)
807 return bo->drv->backend->resource_info(bo, strides, offsets, format_modifier);
808
809 return 0;
810 }
811
drv_get_max_texture_2d_size(struct driver * drv)812 uint32_t drv_get_max_texture_2d_size(struct driver *drv)
813 {
814 if (drv->backend->get_max_texture_2d_size)
815 return drv->backend->get_max_texture_2d_size(drv);
816
817 return UINT32_MAX;
818 }
819