1 /*
2 * Copyright (C) 1999-2007 Brian Paul All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include <dlfcn.h>
24 #include "drm-uapi/drm_fourcc.h"
25 #include "util/u_memory.h"
26 #include "pipe/p_screen.h"
27 #include "state_tracker/st_texture.h"
28 #include "state_tracker/st_context.h"
29 #include "main/texobj.h"
30 #include "util/libsync.h"
31
32 #include "dri_helpers.h"
33 #include "loader_dri_helper.h"
34
35 static bool
dri2_is_opencl_interop_loaded_locked(struct dri_screen * screen)36 dri2_is_opencl_interop_loaded_locked(struct dri_screen *screen)
37 {
38 return screen->opencl_dri_event_add_ref &&
39 screen->opencl_dri_event_release &&
40 screen->opencl_dri_event_wait &&
41 screen->opencl_dri_event_get_fence;
42 }
43
44 static bool
dri2_load_opencl_interop(struct dri_screen * screen)45 dri2_load_opencl_interop(struct dri_screen *screen)
46 {
47 #if defined(RTLD_DEFAULT)
48 bool success;
49
50 mtx_lock(&screen->opencl_func_mutex);
51
52 if (dri2_is_opencl_interop_loaded_locked(screen)) {
53 mtx_unlock(&screen->opencl_func_mutex);
54 return true;
55 }
56
57 screen->opencl_dri_event_add_ref =
58 dlsym(RTLD_DEFAULT, "opencl_dri_event_add_ref");
59 screen->opencl_dri_event_release =
60 dlsym(RTLD_DEFAULT, "opencl_dri_event_release");
61 screen->opencl_dri_event_wait =
62 dlsym(RTLD_DEFAULT, "opencl_dri_event_wait");
63 screen->opencl_dri_event_get_fence =
64 dlsym(RTLD_DEFAULT, "opencl_dri_event_get_fence");
65
66 success = dri2_is_opencl_interop_loaded_locked(screen);
67 mtx_unlock(&screen->opencl_func_mutex);
68 return success;
69 #else
70 return false;
71 #endif
72 }
73
74 struct dri2_fence {
75 struct dri_screen *driscreen;
76 struct pipe_fence_handle *pipe_fence;
77 void *cl_event;
78 };
79
80 unsigned
dri_fence_get_caps(__DRIscreen * _screen)81 dri_fence_get_caps(__DRIscreen *_screen)
82 {
83 struct dri_screen *driscreen = dri_screen(_screen);
84 struct pipe_screen *screen = driscreen->base.screen;
85 unsigned caps = 0;
86
87 if (screen->get_param(screen, PIPE_CAP_NATIVE_FENCE_FD))
88 caps |= __DRI_FENCE_CAP_NATIVE_FD;
89
90 return caps;
91 }
92
93 void *
dri_create_fence(__DRIcontext * _ctx)94 dri_create_fence(__DRIcontext *_ctx)
95 {
96 struct dri_context *ctx = dri_context(_ctx);
97 struct st_context *st = ctx->st;
98 struct dri2_fence *fence = CALLOC_STRUCT(dri2_fence);
99
100 if (!fence)
101 return NULL;
102
103 /* Wait for glthread to finish because we can't use pipe_context from
104 * multiple threads.
105 */
106 _mesa_glthread_finish(st->ctx);
107
108 st_context_flush(st, 0, &fence->pipe_fence, NULL, NULL);
109
110 if (!fence->pipe_fence) {
111 FREE(fence);
112 return NULL;
113 }
114
115 fence->driscreen = ctx->screen;
116 return fence;
117 }
118
119 void *
dri_create_fence_fd(__DRIcontext * _ctx,int fd)120 dri_create_fence_fd(__DRIcontext *_ctx, int fd)
121 {
122 struct dri_context *dri_ctx = dri_context(_ctx);
123 struct st_context *st = dri_ctx->st;
124 struct pipe_context *ctx = st->pipe;
125 struct dri2_fence *fence = CALLOC_STRUCT(dri2_fence);
126
127 /* Wait for glthread to finish because we can't use pipe_context from
128 * multiple threads.
129 */
130 _mesa_glthread_finish(st->ctx);
131
132 if (fd == -1) {
133 /* exporting driver created fence, flush: */
134 st_context_flush(st, ST_FLUSH_FENCE_FD, &fence->pipe_fence, NULL, NULL);
135 } else {
136 /* importing a foreign fence fd: */
137 ctx->create_fence_fd(ctx, &fence->pipe_fence, fd, PIPE_FD_TYPE_NATIVE_SYNC);
138 }
139 if (!fence->pipe_fence) {
140 FREE(fence);
141 return NULL;
142 }
143
144 fence->driscreen = dri_ctx->screen;
145 return fence;
146 }
147
148 int
dri_get_fence_fd(__DRIscreen * _screen,void * _fence)149 dri_get_fence_fd(__DRIscreen *_screen, void *_fence)
150 {
151 struct dri_screen *driscreen = dri_screen(_screen);
152 struct pipe_screen *screen = driscreen->base.screen;
153 struct dri2_fence *fence = (struct dri2_fence*)_fence;
154
155 return screen->fence_get_fd(screen, fence->pipe_fence);
156 }
157
158 void *
dri_get_fence_from_cl_event(__DRIscreen * _screen,intptr_t cl_event)159 dri_get_fence_from_cl_event(__DRIscreen *_screen, intptr_t cl_event)
160 {
161 struct dri_screen *driscreen = dri_screen(_screen);
162 struct dri2_fence *fence;
163
164 if (!dri2_load_opencl_interop(driscreen))
165 return NULL;
166
167 fence = CALLOC_STRUCT(dri2_fence);
168 if (!fence)
169 return NULL;
170
171 fence->cl_event = (void*)cl_event;
172
173 if (!driscreen->opencl_dri_event_add_ref(fence->cl_event)) {
174 free(fence);
175 return NULL;
176 }
177
178 fence->driscreen = driscreen;
179 return fence;
180 }
181
182 void
dri_destroy_fence(__DRIscreen * _screen,void * _fence)183 dri_destroy_fence(__DRIscreen *_screen, void *_fence)
184 {
185 struct dri_screen *driscreen = dri_screen(_screen);
186 struct pipe_screen *screen = driscreen->base.screen;
187 struct dri2_fence *fence = (struct dri2_fence*)_fence;
188
189 if (fence->pipe_fence)
190 screen->fence_reference(screen, &fence->pipe_fence, NULL);
191 else if (fence->cl_event)
192 driscreen->opencl_dri_event_release(fence->cl_event);
193 else
194 assert(0);
195
196 FREE(fence);
197 }
198
199 GLboolean
dri_client_wait_sync(__DRIcontext * _ctx,void * _fence,unsigned flags,uint64_t timeout)200 dri_client_wait_sync(__DRIcontext *_ctx, void *_fence, unsigned flags,
201 uint64_t timeout)
202 {
203 struct dri2_fence *fence = (struct dri2_fence*)_fence;
204 struct dri_screen *driscreen = fence->driscreen;
205 struct pipe_screen *screen = driscreen->base.screen;
206
207 /* No need to flush. The context was flushed when the fence was created. */
208
209 if (fence->pipe_fence)
210 return screen->fence_finish(screen, NULL, fence->pipe_fence, timeout);
211 else if (fence->cl_event) {
212 struct pipe_fence_handle *pipe_fence =
213 driscreen->opencl_dri_event_get_fence(fence->cl_event);
214
215 if (pipe_fence)
216 return screen->fence_finish(screen, NULL, pipe_fence, timeout);
217 else
218 return driscreen->opencl_dri_event_wait(fence->cl_event, timeout);
219 }
220 else {
221 assert(0);
222 return false;
223 }
224 }
225
226 void
dri_server_wait_sync(__DRIcontext * _ctx,void * _fence,unsigned flags)227 dri_server_wait_sync(__DRIcontext *_ctx, void *_fence, unsigned flags)
228 {
229 struct st_context *st = dri_context(_ctx)->st;
230 struct pipe_context *ctx = st->pipe;
231 struct dri2_fence *fence = (struct dri2_fence*)_fence;
232
233 /* We might be called here with a NULL fence as a result of WaitSyncKHR
234 * on a EGL_KHR_reusable_sync fence. Nothing to do here in such case.
235 */
236 if (!fence)
237 return;
238
239 /* Wait for glthread to finish because we can't use pipe_context from
240 * multiple threads.
241 */
242 _mesa_glthread_finish(st->ctx);
243
244 if (ctx->fence_server_sync)
245 ctx->fence_server_sync(ctx, fence->pipe_fence);
246 }
247
248 const __DRI2fenceExtension dri2FenceExtension = {
249 .base = { __DRI2_FENCE, 2 },
250
251 .create_fence = dri_create_fence,
252 .get_fence_from_cl_event = dri_get_fence_from_cl_event,
253 .destroy_fence = dri_destroy_fence,
254 .client_wait_sync = dri_client_wait_sync,
255 .server_wait_sync = dri_server_wait_sync,
256 .get_capabilities = dri_fence_get_caps,
257 .create_fence_fd = dri_create_fence_fd,
258 .get_fence_fd = dri_get_fence_fd,
259 };
260
261 __DRIimage *
dri_create_image_from_renderbuffer(__DRIcontext * context,int renderbuffer,void * loaderPrivate,unsigned * error)262 dri_create_image_from_renderbuffer(__DRIcontext *context,
263 int renderbuffer, void *loaderPrivate,
264 unsigned *error)
265 {
266 struct dri_context *dri_ctx = dri_context(context);
267 struct st_context *st = dri_ctx->st;
268 struct gl_context *ctx = st->ctx;
269 struct pipe_context *p_ctx = st->pipe;
270 struct gl_renderbuffer *rb;
271 struct pipe_resource *tex;
272 __DRIimage *img;
273
274 /* Wait for glthread to finish to get up-to-date GL object lookups. */
275 _mesa_glthread_finish(st->ctx);
276
277 /* Section 3.9 (EGLImage Specification and Management) of the EGL 1.5
278 * specification says:
279 *
280 * "If target is EGL_GL_RENDERBUFFER and buffer is not the name of a
281 * renderbuffer object, or if buffer is the name of a multisampled
282 * renderbuffer object, the error EGL_BAD_PARAMETER is generated."
283 *
284 * "If target is EGL_GL_TEXTURE_2D , EGL_GL_TEXTURE_CUBE_MAP_*,
285 * EGL_GL_RENDERBUFFER or EGL_GL_TEXTURE_3D and buffer refers to the
286 * default GL texture object (0) for the corresponding GL target, the
287 * error EGL_BAD_PARAMETER is generated."
288 * (rely on _mesa_lookup_renderbuffer returning NULL in this case)
289 */
290 rb = _mesa_lookup_renderbuffer(ctx, renderbuffer);
291 if (!rb || rb->NumSamples > 0) {
292 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
293 return NULL;
294 }
295
296 tex = rb->texture;
297 if (!tex) {
298 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
299 return NULL;
300 }
301
302 img = CALLOC_STRUCT(__DRIimageRec);
303 if (!img) {
304 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
305 return NULL;
306 }
307
308 img->dri_format = tex->format;
309 img->internal_format = rb->InternalFormat;
310 img->loader_private = loaderPrivate;
311 img->screen = dri_ctx->screen;
312 img->in_fence_fd = -1;
313
314 pipe_resource_reference(&img->texture, tex);
315
316 /* If the resource supports EGL_MESA_image_dma_buf_export, make sure that
317 * it's in a shareable state. Do this now while we still have the access to
318 * the context.
319 */
320 if (dri2_get_mapping_by_format(img->dri_format)) {
321 p_ctx->flush_resource(p_ctx, tex);
322 st_context_flush(st, 0, NULL, NULL, NULL);
323 }
324
325 ctx->Shared->HasExternallySharedImages = true;
326 *error = __DRI_IMAGE_ERROR_SUCCESS;
327 return img;
328 }
329
330 void
dri2_destroy_image(__DRIimage * img)331 dri2_destroy_image(__DRIimage *img)
332 {
333 const __DRIimageLoaderExtension *imgLoader = img->screen->image.loader;
334 const __DRIdri2LoaderExtension *dri2Loader = img->screen->dri2.loader;
335
336 if (imgLoader && imgLoader->base.version >= 4 &&
337 imgLoader->destroyLoaderImageState) {
338 imgLoader->destroyLoaderImageState(img->loader_private);
339 } else if (dri2Loader && dri2Loader->base.version >= 5 &&
340 dri2Loader->destroyLoaderImageState) {
341 dri2Loader->destroyLoaderImageState(img->loader_private);
342 }
343
344 pipe_resource_reference(&img->texture, NULL);
345
346 if (img->in_fence_fd != -1)
347 close(img->in_fence_fd);
348
349 FREE(img);
350 }
351
352
353 __DRIimage *
dri2_create_from_texture(__DRIcontext * context,int target,unsigned texture,int depth,int level,unsigned * error,void * loaderPrivate)354 dri2_create_from_texture(__DRIcontext *context, int target, unsigned texture,
355 int depth, int level, unsigned *error,
356 void *loaderPrivate)
357 {
358 __DRIimage *img;
359 struct dri_context *dri_ctx = dri_context(context);
360 struct st_context *st = dri_ctx->st;
361 struct gl_context *ctx = st->ctx;
362 struct pipe_context *p_ctx = st->pipe;
363 struct gl_texture_object *obj;
364 struct gl_texture_image *glimg;
365 GLuint face = 0;
366
367 /* Wait for glthread to finish to get up-to-date GL object lookups. */
368 _mesa_glthread_finish(st->ctx);
369
370 obj = _mesa_lookup_texture(ctx, texture);
371 if (!obj || obj->Target != target) {
372 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
373 return NULL;
374 }
375
376 if (target == GL_TEXTURE_CUBE_MAP)
377 face = depth;
378
379 _mesa_test_texobj_completeness(ctx, obj);
380 if (!obj->_BaseComplete || (level > 0 && !obj->_MipmapComplete)) {
381 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
382 return NULL;
383 }
384
385 if (level < obj->Attrib.BaseLevel || level > obj->_MaxLevel) {
386 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
387 return NULL;
388 }
389
390 glimg = obj->Image[face][level];
391 if (!glimg || !glimg->pt) {
392 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
393 return NULL;
394 }
395
396 if (target == GL_TEXTURE_3D && glimg->Depth < depth) {
397 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
398 return NULL;
399 }
400
401 img = CALLOC_STRUCT(__DRIimageRec);
402 if (!img) {
403 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
404 return NULL;
405 }
406
407 img->level = level;
408 img->layer = depth;
409 img->in_fence_fd = -1;
410 img->dri_format = glimg->pt->format;
411 img->internal_format = glimg->InternalFormat;
412
413 img->loader_private = loaderPrivate;
414 img->screen = dri_ctx->screen;
415
416 pipe_resource_reference(&img->texture, glimg->pt);
417
418 /* If the resource supports EGL_MESA_image_dma_buf_export, make sure that
419 * it's in a shareable state. Do this now while we still have the access to
420 * the context.
421 */
422 if (dri2_get_mapping_by_format(img->dri_format)) {
423 p_ctx->flush_resource(p_ctx, glimg->pt);
424 st_context_flush(st, 0, NULL, NULL, NULL);
425 }
426
427 ctx->Shared->HasExternallySharedImages = true;
428 *error = __DRI_IMAGE_ERROR_SUCCESS;
429 return img;
430 }
431
432 static const struct dri2_format_mapping dri2_format_table[] = {
433 { DRM_FORMAT_ABGR16161616F, __DRI_IMAGE_FORMAT_ABGR16161616F,
434 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_R16G16B16A16_FLOAT, 1,
435 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR16161616F } } },
436 { DRM_FORMAT_XBGR16161616F, __DRI_IMAGE_FORMAT_XBGR16161616F,
437 __DRI_IMAGE_COMPONENTS_RGB, PIPE_FORMAT_R16G16B16X16_FLOAT, 1,
438 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR16161616F } } },
439 { DRM_FORMAT_ABGR16161616, __DRI_IMAGE_FORMAT_ABGR16161616,
440 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_R16G16B16A16_UNORM, 1,
441 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR16161616 } } },
442 { DRM_FORMAT_XBGR16161616, __DRI_IMAGE_FORMAT_XBGR16161616,
443 __DRI_IMAGE_COMPONENTS_RGB, PIPE_FORMAT_R16G16B16X16_UNORM, 1,
444 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR16161616 } } },
445 { DRM_FORMAT_ARGB2101010, __DRI_IMAGE_FORMAT_ARGB2101010,
446 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_B10G10R10A2_UNORM, 1,
447 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB2101010 } } },
448 { DRM_FORMAT_XRGB2101010, __DRI_IMAGE_FORMAT_XRGB2101010,
449 __DRI_IMAGE_COMPONENTS_RGB, PIPE_FORMAT_B10G10R10X2_UNORM, 1,
450 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XRGB2101010 } } },
451 { DRM_FORMAT_ABGR2101010, __DRI_IMAGE_FORMAT_ABGR2101010,
452 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_R10G10B10A2_UNORM, 1,
453 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR2101010 } } },
454 { DRM_FORMAT_XBGR2101010, __DRI_IMAGE_FORMAT_XBGR2101010,
455 __DRI_IMAGE_COMPONENTS_RGB, PIPE_FORMAT_R10G10B10X2_UNORM, 1,
456 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR2101010 } } },
457 { DRM_FORMAT_ARGB8888, __DRI_IMAGE_FORMAT_ARGB8888,
458 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_BGRA8888_UNORM, 1,
459 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB8888 } } },
460 { DRM_FORMAT_ABGR8888, __DRI_IMAGE_FORMAT_ABGR8888,
461 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_RGBA8888_UNORM, 1,
462 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR8888 } } },
463 { __DRI_IMAGE_FOURCC_SARGB8888, __DRI_IMAGE_FORMAT_SARGB8,
464 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_BGRA8888_SRGB, 1,
465 { { 0, 0, 0, __DRI_IMAGE_FORMAT_SARGB8 } } },
466 { DRM_FORMAT_XRGB8888, __DRI_IMAGE_FORMAT_XRGB8888,
467 __DRI_IMAGE_COMPONENTS_RGB, PIPE_FORMAT_BGRX8888_UNORM, 1,
468 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XRGB8888 } } },
469 { DRM_FORMAT_XBGR8888, __DRI_IMAGE_FORMAT_XBGR8888,
470 __DRI_IMAGE_COMPONENTS_RGB, PIPE_FORMAT_RGBX8888_UNORM, 1,
471 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR8888 } } },
472 { DRM_FORMAT_ARGB1555, __DRI_IMAGE_FORMAT_ARGB1555,
473 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_B5G5R5A1_UNORM, 1,
474 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB1555 } } },
475 { DRM_FORMAT_ABGR1555, __DRI_IMAGE_FORMAT_ABGR1555,
476 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_R5G5B5A1_UNORM, 1,
477 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR1555 } } },
478 { DRM_FORMAT_ARGB4444, __DRI_IMAGE_FORMAT_ARGB4444,
479 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_B4G4R4A4_UNORM, 1,
480 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB4444 } } },
481 { DRM_FORMAT_ABGR4444, __DRI_IMAGE_FORMAT_ABGR4444,
482 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_R4G4B4A4_UNORM, 1,
483 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR4444 } } },
484 { DRM_FORMAT_RGB565, __DRI_IMAGE_FORMAT_RGB565,
485 __DRI_IMAGE_COMPONENTS_RGB, PIPE_FORMAT_B5G6R5_UNORM, 1,
486 { { 0, 0, 0, __DRI_IMAGE_FORMAT_RGB565 } } },
487 { DRM_FORMAT_R8, __DRI_IMAGE_FORMAT_R8,
488 __DRI_IMAGE_COMPONENTS_R, PIPE_FORMAT_R8_UNORM, 1,
489 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 } } },
490 { DRM_FORMAT_R16, __DRI_IMAGE_FORMAT_R16,
491 __DRI_IMAGE_COMPONENTS_R, PIPE_FORMAT_R16_UNORM, 1,
492 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R16 } } },
493 { DRM_FORMAT_GR88, __DRI_IMAGE_FORMAT_GR88,
494 __DRI_IMAGE_COMPONENTS_RG, PIPE_FORMAT_RG88_UNORM, 1,
495 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88 } } },
496 { DRM_FORMAT_GR1616, __DRI_IMAGE_FORMAT_GR1616,
497 __DRI_IMAGE_COMPONENTS_RG, PIPE_FORMAT_RG1616_UNORM, 1,
498 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR1616 } } },
499
500 { DRM_FORMAT_YUV410, __DRI_IMAGE_FORMAT_NONE,
501 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
502 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
503 { 1, 2, 2, __DRI_IMAGE_FORMAT_R8 },
504 { 2, 2, 2, __DRI_IMAGE_FORMAT_R8 } } },
505 { DRM_FORMAT_YUV411, __DRI_IMAGE_FORMAT_NONE,
506 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
507 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
508 { 1, 2, 0, __DRI_IMAGE_FORMAT_R8 },
509 { 2, 2, 0, __DRI_IMAGE_FORMAT_R8 } } },
510 { DRM_FORMAT_YUV420, __DRI_IMAGE_FORMAT_NONE,
511 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
512 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
513 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8 },
514 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8 } } },
515 { DRM_FORMAT_YUV422, __DRI_IMAGE_FORMAT_NONE,
516 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
517 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
518 { 1, 1, 0, __DRI_IMAGE_FORMAT_R8 },
519 { 2, 1, 0, __DRI_IMAGE_FORMAT_R8 } } },
520 { DRM_FORMAT_YUV444, __DRI_IMAGE_FORMAT_NONE,
521 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
522 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
523 { 1, 0, 0, __DRI_IMAGE_FORMAT_R8 },
524 { 2, 0, 0, __DRI_IMAGE_FORMAT_R8 } } },
525
526 { DRM_FORMAT_YVU410, __DRI_IMAGE_FORMAT_NONE,
527 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
528 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
529 { 2, 2, 2, __DRI_IMAGE_FORMAT_R8 },
530 { 1, 2, 2, __DRI_IMAGE_FORMAT_R8 } } },
531 { DRM_FORMAT_YVU411, __DRI_IMAGE_FORMAT_NONE,
532 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
533 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
534 { 2, 2, 0, __DRI_IMAGE_FORMAT_R8 },
535 { 1, 2, 0, __DRI_IMAGE_FORMAT_R8 } } },
536 { DRM_FORMAT_YVU420, __DRI_IMAGE_FORMAT_NONE,
537 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
538 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
539 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8 },
540 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8 } } },
541 { DRM_FORMAT_YVU422, __DRI_IMAGE_FORMAT_NONE,
542 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
543 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
544 { 2, 1, 0, __DRI_IMAGE_FORMAT_R8 },
545 { 1, 1, 0, __DRI_IMAGE_FORMAT_R8 } } },
546 { DRM_FORMAT_YVU444, __DRI_IMAGE_FORMAT_NONE,
547 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
548 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
549 { 2, 0, 0, __DRI_IMAGE_FORMAT_R8 },
550 { 1, 0, 0, __DRI_IMAGE_FORMAT_R8 } } },
551
552 { DRM_FORMAT_NV12, __DRI_IMAGE_FORMAT_NONE,
553 __DRI_IMAGE_COMPONENTS_Y_UV, PIPE_FORMAT_NV12, 2,
554 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
555 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR88 } } },
556 { DRM_FORMAT_NV21, __DRI_IMAGE_FORMAT_NONE,
557 __DRI_IMAGE_COMPONENTS_Y_UV, PIPE_FORMAT_NV21, 2,
558 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
559 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR88 } } },
560
561 { DRM_FORMAT_P010, __DRI_IMAGE_FORMAT_NONE,
562 __DRI_IMAGE_COMPONENTS_Y_UV, PIPE_FORMAT_P010, 2,
563 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R16 },
564 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR1616 } } },
565 { DRM_FORMAT_P012, __DRI_IMAGE_FORMAT_NONE,
566 __DRI_IMAGE_COMPONENTS_Y_UV, PIPE_FORMAT_P012, 2,
567 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R16 },
568 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR1616 } } },
569 { DRM_FORMAT_P016, __DRI_IMAGE_FORMAT_NONE,
570 __DRI_IMAGE_COMPONENTS_Y_UV, PIPE_FORMAT_P016, 2,
571 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R16 },
572 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR1616 } } },
573 { DRM_FORMAT_P030, __DRI_IMAGE_FORMAT_NONE,
574 __DRI_IMAGE_COMPONENTS_Y_UV, PIPE_FORMAT_P030, 2,
575 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R16 },
576 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR1616 } } },
577
578 { DRM_FORMAT_NV16, __DRI_IMAGE_FORMAT_NONE,
579 __DRI_IMAGE_COMPONENTS_Y_UV, PIPE_FORMAT_NV12, 2,
580 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
581 { 1, 1, 0, __DRI_IMAGE_FORMAT_GR88 } } },
582
583 { DRM_FORMAT_AYUV, __DRI_IMAGE_FORMAT_ABGR8888,
584 __DRI_IMAGE_COMPONENTS_AYUV, PIPE_FORMAT_AYUV, 1,
585 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR8888 } } },
586 { DRM_FORMAT_XYUV8888, __DRI_IMAGE_FORMAT_XBGR8888,
587 __DRI_IMAGE_COMPONENTS_XYUV, PIPE_FORMAT_XYUV, 1,
588 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR8888 } } },
589
590 { DRM_FORMAT_Y410, __DRI_IMAGE_FORMAT_ABGR2101010,
591 __DRI_IMAGE_COMPONENTS_AYUV, PIPE_FORMAT_Y410, 1,
592 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR2101010 } } },
593
594 /* Y412 is an unusual format. It has the same layout as Y416 (i.e.,
595 * 16-bits of physical storage per channel), but the low 4 bits of each
596 * component are unused padding. The writer is supposed to write zeros
597 * to these bits.
598 */
599 { DRM_FORMAT_Y412, __DRI_IMAGE_FORMAT_ABGR16161616,
600 __DRI_IMAGE_COMPONENTS_AYUV, PIPE_FORMAT_Y412, 1,
601 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR16161616 } } },
602 { DRM_FORMAT_Y416, __DRI_IMAGE_FORMAT_ABGR16161616,
603 __DRI_IMAGE_COMPONENTS_AYUV, PIPE_FORMAT_Y416, 1,
604 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR16161616 } } },
605
606 /* For YUYV and UYVY buffers, we set up two overlapping DRI images
607 * and treat them as planar buffers in the compositors.
608 * Plane 0 is GR88 and samples YU or YV pairs and places Y into
609 * the R component, while plane 1 is ARGB/ABGR and samples YUYV/UYVY
610 * clusters and places pairs and places U into the G component and
611 * V into A. This lets the texture sampler interpolate the Y
612 * components correctly when sampling from plane 0, and interpolate
613 * U and V correctly when sampling from plane 1. */
614 { DRM_FORMAT_YUYV, __DRI_IMAGE_FORMAT_NONE,
615 __DRI_IMAGE_COMPONENTS_Y_XUXV, PIPE_FORMAT_YUYV, 2,
616 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88 },
617 { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888 } } },
618 { DRM_FORMAT_YVYU, __DRI_IMAGE_FORMAT_NONE,
619 __DRI_IMAGE_COMPONENTS_Y_XUXV, PIPE_FORMAT_YVYU, 2,
620 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88 },
621 { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888 } } },
622 { DRM_FORMAT_UYVY, __DRI_IMAGE_FORMAT_NONE,
623 __DRI_IMAGE_COMPONENTS_Y_UXVX, PIPE_FORMAT_UYVY, 2,
624 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88 },
625 { 0, 1, 0, __DRI_IMAGE_FORMAT_ABGR8888 } } },
626 { DRM_FORMAT_VYUY, __DRI_IMAGE_FORMAT_NONE,
627 __DRI_IMAGE_COMPONENTS_Y_UXVX, PIPE_FORMAT_VYUY, 2,
628 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88 },
629 { 0, 1, 0, __DRI_IMAGE_FORMAT_ABGR8888 } } },
630
631 /* The Y21x formats work in a similar fashion to the YUYV and UYVY
632 * formats.
633 */
634 { DRM_FORMAT_Y210, __DRI_IMAGE_FORMAT_NONE,
635 __DRI_IMAGE_COMPONENTS_Y_XUXV, PIPE_FORMAT_Y210, 2,
636 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR1616 },
637 { 0, 1, 0, __DRI_IMAGE_FORMAT_ABGR16161616 } } },
638 /* Y212 is an unusual format. It has the same layout as Y216 (i.e.,
639 * 16-bits of physical storage per channel), but the low 4 bits of each
640 * component are unused padding. The writer is supposed to write zeros
641 * to these bits.
642 */
643 { DRM_FORMAT_Y212, __DRI_IMAGE_FORMAT_NONE,
644 __DRI_IMAGE_COMPONENTS_Y_XUXV, PIPE_FORMAT_Y212, 2,
645 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR1616 },
646 { 0, 1, 0, __DRI_IMAGE_FORMAT_ABGR16161616 } } },
647 { DRM_FORMAT_Y216, __DRI_IMAGE_FORMAT_NONE,
648 __DRI_IMAGE_COMPONENTS_Y_XUXV, PIPE_FORMAT_Y216, 2,
649 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR1616 },
650 { 0, 1, 0, __DRI_IMAGE_FORMAT_ABGR16161616 } } },
651 };
652
653 const struct dri2_format_mapping *
dri2_get_mapping_by_fourcc(int fourcc)654 dri2_get_mapping_by_fourcc(int fourcc)
655 {
656 for (unsigned i = 0; i < ARRAY_SIZE(dri2_format_table); i++) {
657 if (dri2_format_table[i].dri_fourcc == fourcc)
658 return &dri2_format_table[i];
659 }
660
661 return NULL;
662 }
663
664 const struct dri2_format_mapping *
dri2_get_mapping_by_format(int format)665 dri2_get_mapping_by_format(int format)
666 {
667 if (format == __DRI_IMAGE_FORMAT_NONE)
668 return NULL;
669
670 for (unsigned i = 0; i < ARRAY_SIZE(dri2_format_table); i++) {
671 if (dri2_format_table[i].dri_format == format)
672 return &dri2_format_table[i];
673 }
674
675 return NULL;
676 }
677
678 enum pipe_format
dri2_get_pipe_format_for_dri_format(int format)679 dri2_get_pipe_format_for_dri_format(int format)
680 {
681 for (unsigned i = 0; i < ARRAY_SIZE(dri2_format_table); i++) {
682 if (dri2_format_table[i].dri_format == format)
683 return dri2_format_table[i].pipe_format;
684 }
685
686 return PIPE_FORMAT_NONE;
687 }
688
689 bool
dri2_yuv_dma_buf_supported(struct dri_screen * screen,const struct dri2_format_mapping * map)690 dri2_yuv_dma_buf_supported(struct dri_screen *screen,
691 const struct dri2_format_mapping *map)
692 {
693 struct pipe_screen *pscreen = screen->base.screen;
694
695 for (unsigned i = 0; i < map->nplanes; i++) {
696 if (!pscreen->is_format_supported(pscreen,
697 dri2_get_pipe_format_for_dri_format(map->planes[i].dri_format),
698 screen->target, 0, 0, PIPE_BIND_SAMPLER_VIEW))
699 return false;
700 }
701 return true;
702 }
703
704 bool
dri_query_dma_buf_formats(__DRIscreen * _screen,int max,int * formats,int * count)705 dri_query_dma_buf_formats(__DRIscreen *_screen, int max, int *formats,
706 int *count)
707 {
708 struct dri_screen *screen = dri_screen(_screen);
709 struct pipe_screen *pscreen = screen->base.screen;
710 int i, j;
711
712 for (i = 0, j = 0; (i < ARRAY_SIZE(dri2_format_table)) &&
713 (j < max || max == 0); i++) {
714 const struct dri2_format_mapping *map = &dri2_format_table[i];
715
716 /* The sRGB format is not a real FourCC as defined by drm_fourcc.h, so we
717 * must not leak it out to clients. */
718 if (dri2_format_table[i].dri_fourcc == __DRI_IMAGE_FOURCC_SARGB8888)
719 continue;
720
721 if (pscreen->is_format_supported(pscreen, map->pipe_format,
722 screen->target, 0, 0,
723 PIPE_BIND_RENDER_TARGET) ||
724 pscreen->is_format_supported(pscreen, map->pipe_format,
725 screen->target, 0, 0,
726 PIPE_BIND_SAMPLER_VIEW) ||
727 dri2_yuv_dma_buf_supported(screen, map)) {
728 if (j < max)
729 formats[j] = map->dri_fourcc;
730 j++;
731 }
732 }
733 *count = j;
734 return true;
735 }
736
737
738 __DRIimage *
dri_create_image_with_modifiers(__DRIscreen * screen,uint32_t width,uint32_t height,uint32_t dri_format,uint32_t dri_usage,const uint64_t * modifiers,unsigned int modifiers_count,void * loaderPrivate)739 dri_create_image_with_modifiers(__DRIscreen *screen,
740 uint32_t width, uint32_t height,
741 uint32_t dri_format, uint32_t dri_usage,
742 const uint64_t *modifiers,
743 unsigned int modifiers_count,
744 void *loaderPrivate)
745 {
746 if (modifiers && modifiers_count > 0) {
747 bool has_valid_modifier = false;
748 int i;
749
750 /* It's acceptable to create an image with INVALID modifier in the list,
751 * but it cannot be on the only modifier (since it will certainly fail
752 * later). While we could easily catch this after modifier creation, doing
753 * the check here is a convenient debug check likely pointing at whatever
754 * interface the client is using to build its modifier list.
755 */
756 for (i = 0; i < modifiers_count; i++) {
757 if (modifiers[i] != DRM_FORMAT_MOD_INVALID) {
758 has_valid_modifier = true;
759 break;
760 }
761 }
762 if (!has_valid_modifier)
763 return NULL;
764 }
765
766 return dri_create_image(screen, width, height, dri_format,
767 modifiers, modifiers_count, dri_usage,
768 loaderPrivate);
769 }
770
771 void
dri_image_fence_sync(struct dri_context * ctx,__DRIimage * img)772 dri_image_fence_sync(struct dri_context *ctx, __DRIimage *img)
773 {
774 struct pipe_context *pipe = ctx->st->pipe;
775 struct pipe_fence_handle *fence;
776 int fd = img->in_fence_fd;
777
778 if (fd == -1)
779 return;
780
781 validate_fence_fd(fd);
782
783 img->in_fence_fd = -1;
784
785 pipe->create_fence_fd(pipe, &fence, fd, PIPE_FD_TYPE_NATIVE_SYNC);
786 pipe->fence_server_sync(pipe, fence);
787 pipe->screen->fence_reference(pipe->screen, &fence, NULL);
788
789 close(fd);
790 }
791 /* vim: set sw=3 ts=8 sts=3 expandtab: */
792