1 /**************************************************************************
2 *
3 * Copyright 2010 Thomas Balling Sørensen & Orasanu Lucian.
4 * Copyright 2014 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include "pipe/p_video_codec.h"
30
31 #include "util/u_handle_table.h"
32 #include "util/u_video.h"
33 #include "util/u_memory.h"
34 #include "util/set.h"
35
36 #include "util/vl_vlc.h"
37 #include "vl/vl_winsys.h"
38
39 #include "va_private.h"
40
41 void
vlVaSetSurfaceContext(vlVaDriver * drv,vlVaSurface * surf,vlVaContext * context)42 vlVaSetSurfaceContext(vlVaDriver *drv, vlVaSurface *surf, vlVaContext *context)
43 {
44 if (surf->ctx == context)
45 return;
46
47 if (surf->ctx) {
48 assert(_mesa_set_search(surf->ctx->surfaces, surf));
49 _mesa_set_remove_key(surf->ctx->surfaces, surf);
50
51 /* Only drivers supporting PIPE_VIDEO_ENTRYPOINT_PROCESSING will create
52 * decoder for postproc context and thus be able to wait on and destroy
53 * the surface fence. On other drivers we need to destroy the fence here
54 * otherwise vaQuerySurfaceStatus/vaSyncSurface will fail and we'll also
55 * potentially leak the fence.
56 */
57 if (surf->fence && !context->decoder &&
58 context->templat.entrypoint == PIPE_VIDEO_ENTRYPOINT_PROCESSING &&
59 surf->ctx->decoder && surf->ctx->decoder->destroy_fence &&
60 !drv->pipe->screen->get_video_param(drv->pipe->screen,
61 PIPE_VIDEO_PROFILE_UNKNOWN,
62 PIPE_VIDEO_ENTRYPOINT_PROCESSING,
63 PIPE_VIDEO_CAP_SUPPORTED)) {
64 surf->ctx->decoder->destroy_fence(surf->ctx->decoder, surf->fence);
65 surf->fence = NULL;
66 }
67 }
68
69 surf->ctx = context;
70 _mesa_set_add(surf->ctx->surfaces, surf);
71 }
72
73 VAStatus
vlVaBeginPicture(VADriverContextP ctx,VAContextID context_id,VASurfaceID render_target)74 vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID render_target)
75 {
76 vlVaDriver *drv;
77 vlVaContext *context;
78 vlVaSurface *surf;
79
80 if (!ctx)
81 return VA_STATUS_ERROR_INVALID_CONTEXT;
82
83 drv = VL_VA_DRIVER(ctx);
84 if (!drv)
85 return VA_STATUS_ERROR_INVALID_CONTEXT;
86
87 mtx_lock(&drv->mutex);
88 context = handle_table_get(drv->htab, context_id);
89 if (!context) {
90 mtx_unlock(&drv->mutex);
91 return VA_STATUS_ERROR_INVALID_CONTEXT;
92 }
93
94 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG12) {
95 context->desc.mpeg12.intra_matrix = NULL;
96 context->desc.mpeg12.non_intra_matrix = NULL;
97 }
98
99 surf = handle_table_get(drv->htab, render_target);
100 vlVaGetSurfaceBuffer(drv, surf);
101 if (!surf || !surf->buffer) {
102 mtx_unlock(&drv->mutex);
103 return VA_STATUS_ERROR_INVALID_SURFACE;
104 }
105
106 context->target_id = render_target;
107 vlVaSetSurfaceContext(drv, surf, context);
108 context->target = surf->buffer;
109 context->mjpeg.sampling_factor = 0;
110
111 if (!context->decoder) {
112
113 /* VPP */
114 if (context->templat.profile == PIPE_VIDEO_PROFILE_UNKNOWN &&
115 context->target->buffer_format != PIPE_FORMAT_B8G8R8A8_UNORM &&
116 context->target->buffer_format != PIPE_FORMAT_R8G8B8A8_UNORM &&
117 context->target->buffer_format != PIPE_FORMAT_B8G8R8X8_UNORM &&
118 context->target->buffer_format != PIPE_FORMAT_R8G8B8X8_UNORM &&
119 context->target->buffer_format != PIPE_FORMAT_B10G10R10A2_UNORM &&
120 context->target->buffer_format != PIPE_FORMAT_R10G10B10A2_UNORM &&
121 context->target->buffer_format != PIPE_FORMAT_B10G10R10X2_UNORM &&
122 context->target->buffer_format != PIPE_FORMAT_R10G10B10X2_UNORM &&
123 context->target->buffer_format != PIPE_FORMAT_NV12 &&
124 context->target->buffer_format != PIPE_FORMAT_P010 &&
125 context->target->buffer_format != PIPE_FORMAT_P016) {
126 mtx_unlock(&drv->mutex);
127 return VA_STATUS_ERROR_UNIMPLEMENTED;
128 }
129
130 if (drv->pipe->screen->get_video_param(drv->pipe->screen,
131 PIPE_VIDEO_PROFILE_UNKNOWN,
132 PIPE_VIDEO_ENTRYPOINT_PROCESSING,
133 PIPE_VIDEO_CAP_SUPPORTED)) {
134 context->needs_begin_frame = true;
135 }
136
137 mtx_unlock(&drv->mutex);
138 return VA_STATUS_SUCCESS;
139 }
140
141 if (context->decoder->entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE)
142 context->needs_begin_frame = true;
143
144 /* meta data and seis are per picture basis, it needs to be
145 * cleared before rendering the picture. */
146 if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
147 switch (u_reduce_video_profile(context->templat.profile)) {
148 case PIPE_VIDEO_FORMAT_AV1:
149 context->desc.av1enc.metadata_flags.value = 0;
150 context->desc.av1enc.roi.num = 0;
151 context->desc.av1enc.intra_refresh.mode = INTRA_REFRESH_MODE_NONE;
152 break;
153 case PIPE_VIDEO_FORMAT_HEVC:
154 context->desc.h265enc.roi.num = 0;
155 context->desc.h265enc.intra_refresh.mode = INTRA_REFRESH_MODE_NONE;
156 break;
157 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
158 context->desc.h264enc.roi.num = 0;
159 context->desc.h264enc.intra_refresh.mode = INTRA_REFRESH_MODE_NONE;
160 break;
161 default:
162 break;
163 }
164 }
165
166 context->slice_data_offset = 0;
167 context->have_slice_params = false;
168
169 mtx_unlock(&drv->mutex);
170 return VA_STATUS_SUCCESS;
171 }
172
173 void
vlVaGetReferenceFrame(vlVaDriver * drv,VASurfaceID surface_id,struct pipe_video_buffer ** ref_frame)174 vlVaGetReferenceFrame(vlVaDriver *drv, VASurfaceID surface_id,
175 struct pipe_video_buffer **ref_frame)
176 {
177 vlVaSurface *surf = handle_table_get(drv->htab, surface_id);
178 if (surf)
179 *ref_frame = vlVaGetSurfaceBuffer(drv, surf);
180 else
181 *ref_frame = NULL;
182 }
183 /*
184 * in->quality = 0; without any settings, it is using speed preset
185 * and no preencode and no vbaq. It is the fastest setting.
186 * in->quality = 1; suggested setting, with balanced preset, and
187 * preencode and vbaq
188 * in->quality = others; it is the customized setting
189 * with valid bit (bit #0) set to "1"
190 * for example:
191 *
192 * 0x3 (balance preset, no pre-encoding, no vbaq)
193 * 0x13 (balanced preset, no pre-encoding, vbaq)
194 * 0x13 (balanced preset, no pre-encoding, vbaq)
195 * 0x9 (speed preset, pre-encoding, no vbaq)
196 * 0x19 (speed preset, pre-encoding, vbaq)
197 *
198 * The quality value has to be treated as a combination
199 * of preset mode, pre-encoding and vbaq settings.
200 * The quality and speed could be vary according to
201 * different settings,
202 */
203 void
vlVaHandleVAEncMiscParameterTypeQualityLevel(struct pipe_enc_quality_modes * p,vlVaQualityBits * in)204 vlVaHandleVAEncMiscParameterTypeQualityLevel(struct pipe_enc_quality_modes *p, vlVaQualityBits *in)
205 {
206 if (!in->quality) {
207 p->level = 0;
208 p->preset_mode = PRESET_MODE_SPEED;
209 p->pre_encode_mode = PREENCODING_MODE_DISABLE;
210 p->vbaq_mode = VBAQ_DISABLE;
211
212 return;
213 }
214
215 if (p->level != in->quality) {
216 if (in->quality == 1) {
217 p->preset_mode = PRESET_MODE_BALANCE;
218 p->pre_encode_mode = PREENCODING_MODE_DEFAULT;
219 p->vbaq_mode = VBAQ_AUTO;
220 } else {
221 p->preset_mode = in->preset_mode > PRESET_MODE_HIGH_QUALITY
222 ? PRESET_MODE_HIGH_QUALITY : in->preset_mode;
223 p->pre_encode_mode = in->pre_encode_mode;
224 p->vbaq_mode = in->vbaq_mode;
225 }
226 }
227 p->level = in->quality;
228 }
229
230 static VAStatus
handlePictureParameterBuffer(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)231 handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
232 {
233 VAStatus vaStatus = VA_STATUS_SUCCESS;
234 enum pipe_video_format format =
235 u_reduce_video_profile(context->templat.profile);
236
237 switch (format) {
238 case PIPE_VIDEO_FORMAT_MPEG12:
239 vlVaHandlePictureParameterBufferMPEG12(drv, context, buf);
240 break;
241
242 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
243 vlVaHandlePictureParameterBufferH264(drv, context, buf);
244 break;
245
246 case PIPE_VIDEO_FORMAT_VC1:
247 vlVaHandlePictureParameterBufferVC1(drv, context, buf);
248 break;
249
250 case PIPE_VIDEO_FORMAT_MPEG4:
251 vlVaHandlePictureParameterBufferMPEG4(drv, context, buf);
252 break;
253
254 case PIPE_VIDEO_FORMAT_HEVC:
255 vlVaHandlePictureParameterBufferHEVC(drv, context, buf);
256 break;
257
258 case PIPE_VIDEO_FORMAT_JPEG:
259 vlVaHandlePictureParameterBufferMJPEG(drv, context, buf);
260 break;
261
262 case PIPE_VIDEO_FORMAT_VP9:
263 vlVaHandlePictureParameterBufferVP9(drv, context, buf);
264 break;
265
266 case PIPE_VIDEO_FORMAT_AV1:
267 vlVaHandlePictureParameterBufferAV1(drv, context, buf);
268 break;
269
270 default:
271 break;
272 }
273
274 /* Create the decoder once max_references is known. */
275 if (!context->decoder) {
276 if (!context->target)
277 return VA_STATUS_ERROR_INVALID_CONTEXT;
278
279 if (format == PIPE_VIDEO_FORMAT_MPEG4_AVC)
280 context->templat.level = u_get_h264_level(context->templat.width,
281 context->templat.height, &context->templat.max_references);
282
283 context->decoder = drv->pipe->create_video_codec(drv->pipe,
284 &context->templat);
285
286 if (!context->decoder)
287 return VA_STATUS_ERROR_ALLOCATION_FAILED;
288
289 context->needs_begin_frame = true;
290 }
291
292 if (format == PIPE_VIDEO_FORMAT_VP9) {
293 context->decoder->width =
294 context->desc.vp9.picture_parameter.frame_width;
295 context->decoder->height =
296 context->desc.vp9.picture_parameter.frame_height;
297 }
298
299 return vaStatus;
300 }
301
302 static void
handleIQMatrixBuffer(vlVaContext * context,vlVaBuffer * buf)303 handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf)
304 {
305 switch (u_reduce_video_profile(context->templat.profile)) {
306 case PIPE_VIDEO_FORMAT_MPEG12:
307 vlVaHandleIQMatrixBufferMPEG12(context, buf);
308 break;
309
310 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
311 vlVaHandleIQMatrixBufferH264(context, buf);
312 break;
313
314 case PIPE_VIDEO_FORMAT_MPEG4:
315 vlVaHandleIQMatrixBufferMPEG4(context, buf);
316 break;
317
318 case PIPE_VIDEO_FORMAT_HEVC:
319 vlVaHandleIQMatrixBufferHEVC(context, buf);
320 break;
321
322 case PIPE_VIDEO_FORMAT_JPEG:
323 vlVaHandleIQMatrixBufferMJPEG(context, buf);
324 break;
325
326 default:
327 break;
328 }
329 }
330
331 static void
handleSliceParameterBuffer(vlVaContext * context,vlVaBuffer * buf)332 handleSliceParameterBuffer(vlVaContext *context, vlVaBuffer *buf)
333 {
334 switch (u_reduce_video_profile(context->templat.profile)) {
335 case PIPE_VIDEO_FORMAT_MPEG12:
336 vlVaHandleSliceParameterBufferMPEG12(context, buf);
337 break;
338
339 case PIPE_VIDEO_FORMAT_VC1:
340 vlVaHandleSliceParameterBufferVC1(context, buf);
341 break;
342
343 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
344 vlVaHandleSliceParameterBufferH264(context, buf);
345 break;
346
347 case PIPE_VIDEO_FORMAT_MPEG4:
348 vlVaHandleSliceParameterBufferMPEG4(context, buf);
349 break;
350
351 case PIPE_VIDEO_FORMAT_HEVC:
352 vlVaHandleSliceParameterBufferHEVC(context, buf);
353 break;
354
355 case PIPE_VIDEO_FORMAT_JPEG:
356 vlVaHandleSliceParameterBufferMJPEG(context, buf);
357 break;
358
359 case PIPE_VIDEO_FORMAT_VP9:
360 vlVaHandleSliceParameterBufferVP9(context, buf);
361 break;
362
363 case PIPE_VIDEO_FORMAT_AV1:
364 vlVaHandleSliceParameterBufferAV1(context, buf);
365 break;
366
367 default:
368 break;
369 }
370 }
371
372 static unsigned int
bufHasStartcode(vlVaBuffer * buf,unsigned int code,unsigned int bits)373 bufHasStartcode(vlVaBuffer *buf, unsigned int code, unsigned int bits)
374 {
375 struct vl_vlc vlc = {0};
376 int i;
377
378 /* search the first 64 bytes for a startcode */
379 vl_vlc_init(&vlc, 1, (const void * const*)&buf->data, &buf->size);
380 for (i = 0; i < 64 && vl_vlc_bits_left(&vlc) >= bits; ++i) {
381 if (vl_vlc_peekbits(&vlc, bits) == code)
382 return 1;
383 vl_vlc_eatbits(&vlc, 8);
384 vl_vlc_fillbits(&vlc);
385 }
386
387 return 0;
388 }
389
390 static void
handleVAProtectedSliceDataBufferType(vlVaContext * context,vlVaBuffer * buf)391 handleVAProtectedSliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
392 {
393 uint8_t* encrypted_data = (uint8_t*) buf->data;
394 uint8_t* drm_key;
395
396 unsigned int drm_key_size = buf->size;
397
398 drm_key = REALLOC(context->desc.base.decrypt_key,
399 context->desc.base.key_size, drm_key_size);
400 if (!drm_key)
401 return;
402 context->desc.base.decrypt_key = drm_key;
403 memcpy(context->desc.base.decrypt_key, encrypted_data, drm_key_size);
404 context->desc.base.key_size = drm_key_size;
405 context->desc.base.protected_playback = true;
406 }
407
408 static VAStatus
handleVASliceDataBufferType(vlVaContext * context,vlVaBuffer * buf)409 handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
410 {
411 enum pipe_video_format format = u_reduce_video_profile(context->templat.profile);
412 static const uint8_t start_code_h264[] = { 0x00, 0x00, 0x01 };
413 static const uint8_t start_code_h265[] = { 0x00, 0x00, 0x01 };
414 static const uint8_t start_code_vc1[] = { 0x00, 0x00, 0x01, 0x0d };
415 static const uint8_t eoi_jpeg[] = { 0xff, 0xd9 };
416
417 if (!context->decoder)
418 return VA_STATUS_ERROR_INVALID_CONTEXT;
419
420 if (context->bs.allocated_size - context->bs.num_buffers < 3) {
421 context->bs.buffers = REALLOC(context->bs.buffers,
422 context->bs.allocated_size * sizeof(*context->bs.buffers),
423 (context->bs.allocated_size + 3) * sizeof(*context->bs.buffers));
424 context->bs.sizes = REALLOC(context->bs.sizes,
425 context->bs.allocated_size * sizeof(*context->bs.sizes),
426 (context->bs.allocated_size + 3) * sizeof(*context->bs.sizes));
427 context->bs.allocated_size += 3;
428 }
429
430 format = u_reduce_video_profile(context->templat.profile);
431 if (!context->desc.base.protected_playback) {
432 switch (format) {
433 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
434 if (bufHasStartcode(buf, 0x000001, 24))
435 break;
436
437 context->bs.buffers[context->bs.num_buffers] = (void *const)&start_code_h264;
438 context->bs.sizes[context->bs.num_buffers++] = sizeof(start_code_h264);
439 break;
440 case PIPE_VIDEO_FORMAT_HEVC:
441 if (bufHasStartcode(buf, 0x000001, 24))
442 break;
443
444 context->bs.buffers[context->bs.num_buffers] = (void *const)&start_code_h265;
445 context->bs.sizes[context->bs.num_buffers++] = sizeof(start_code_h265);
446 break;
447 case PIPE_VIDEO_FORMAT_VC1:
448 if (bufHasStartcode(buf, 0x0000010d, 32) ||
449 bufHasStartcode(buf, 0x0000010c, 32) ||
450 bufHasStartcode(buf, 0x0000010b, 32))
451 break;
452
453 if (context->decoder->profile == PIPE_VIDEO_PROFILE_VC1_ADVANCED) {
454 context->bs.buffers[context->bs.num_buffers] = (void *const)&start_code_vc1;
455 context->bs.sizes[context->bs.num_buffers++] = sizeof(start_code_vc1);
456 }
457 break;
458 case PIPE_VIDEO_FORMAT_MPEG4:
459 if (bufHasStartcode(buf, 0x000001, 24))
460 break;
461
462 vlVaDecoderFixMPEG4Startcode(context);
463 context->bs.buffers[context->bs.num_buffers] = (void *)context->mpeg4.start_code;
464 context->bs.sizes[context->bs.num_buffers++] = context->mpeg4.start_code_size;
465 break;
466 case PIPE_VIDEO_FORMAT_JPEG:
467 if (bufHasStartcode(buf, 0xffd8ffdb, 32))
468 break;
469
470 vlVaGetJpegSliceHeader(context);
471 context->bs.buffers[context->bs.num_buffers] = (void *)context->mjpeg.slice_header;
472 context->bs.sizes[context->bs.num_buffers++] = context->mjpeg.slice_header_size;
473 break;
474 case PIPE_VIDEO_FORMAT_VP9:
475 if (false == context->desc.base.protected_playback)
476 vlVaDecoderVP9BitstreamHeader(context, buf);
477 break;
478 case PIPE_VIDEO_FORMAT_AV1:
479 break;
480 default:
481 break;
482 }
483 }
484
485 context->bs.buffers[context->bs.num_buffers] = buf->data;
486 context->bs.sizes[context->bs.num_buffers++] = buf->size;
487
488 if (format == PIPE_VIDEO_FORMAT_JPEG) {
489 context->bs.buffers[context->bs.num_buffers] = (void *const)&eoi_jpeg;
490 context->bs.sizes[context->bs.num_buffers++] = sizeof(eoi_jpeg);
491 }
492
493 if (context->needs_begin_frame) {
494 context->decoder->begin_frame(context->decoder, context->target,
495 &context->desc.base);
496 context->needs_begin_frame = false;
497 }
498 return VA_STATUS_SUCCESS;
499 }
500
501 static VAStatus
handleVAEncMiscParameterTypeRateControl(vlVaContext * context,VAEncMiscParameterBuffer * misc)502 handleVAEncMiscParameterTypeRateControl(vlVaContext *context, VAEncMiscParameterBuffer *misc)
503 {
504 VAStatus status = VA_STATUS_SUCCESS;
505
506 switch (u_reduce_video_profile(context->templat.profile)) {
507 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
508 status = vlVaHandleVAEncMiscParameterTypeRateControlH264(context, misc);
509 break;
510
511 case PIPE_VIDEO_FORMAT_HEVC:
512 status = vlVaHandleVAEncMiscParameterTypeRateControlHEVC(context, misc);
513 break;
514
515 #if VA_CHECK_VERSION(1, 16, 0)
516 case PIPE_VIDEO_FORMAT_AV1:
517 status = vlVaHandleVAEncMiscParameterTypeRateControlAV1(context, misc);
518 break;
519 #endif
520 default:
521 break;
522 }
523
524 return status;
525 }
526
527 static VAStatus
handleVAEncMiscParameterTypeFrameRate(vlVaContext * context,VAEncMiscParameterBuffer * misc)528 handleVAEncMiscParameterTypeFrameRate(vlVaContext *context, VAEncMiscParameterBuffer *misc)
529 {
530 VAStatus status = VA_STATUS_SUCCESS;
531
532 switch (u_reduce_video_profile(context->templat.profile)) {
533 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
534 status = vlVaHandleVAEncMiscParameterTypeFrameRateH264(context, misc);
535 break;
536
537 case PIPE_VIDEO_FORMAT_HEVC:
538 status = vlVaHandleVAEncMiscParameterTypeFrameRateHEVC(context, misc);
539 break;
540
541 #if VA_CHECK_VERSION(1, 16, 0)
542 case PIPE_VIDEO_FORMAT_AV1:
543 status = vlVaHandleVAEncMiscParameterTypeFrameRateAV1(context, misc);
544 break;
545 #endif
546 default:
547 break;
548 }
549
550 return status;
551 }
552
553 static VAStatus
handleVAEncMiscParameterTypeTemporalLayer(vlVaContext * context,VAEncMiscParameterBuffer * misc)554 handleVAEncMiscParameterTypeTemporalLayer(vlVaContext *context, VAEncMiscParameterBuffer *misc)
555 {
556 VAStatus status = VA_STATUS_SUCCESS;
557
558 switch (u_reduce_video_profile(context->templat.profile)) {
559 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
560 status = vlVaHandleVAEncMiscParameterTypeTemporalLayerH264(context, misc);
561 break;
562
563 case PIPE_VIDEO_FORMAT_HEVC:
564 status = vlVaHandleVAEncMiscParameterTypeTemporalLayerHEVC(context, misc);
565 break;
566
567 default:
568 break;
569 }
570
571 return status;
572 }
573
574 static VAStatus
handleVAEncSequenceParameterBufferType(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)575 handleVAEncSequenceParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
576 {
577 VAStatus status = VA_STATUS_SUCCESS;
578
579 switch (u_reduce_video_profile(context->templat.profile)) {
580 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
581 status = vlVaHandleVAEncSequenceParameterBufferTypeH264(drv, context, buf);
582 break;
583
584 case PIPE_VIDEO_FORMAT_HEVC:
585 status = vlVaHandleVAEncSequenceParameterBufferTypeHEVC(drv, context, buf);
586 break;
587
588 #if VA_CHECK_VERSION(1, 16, 0)
589 case PIPE_VIDEO_FORMAT_AV1:
590 status = vlVaHandleVAEncSequenceParameterBufferTypeAV1(drv, context, buf);
591 break;
592 #endif
593
594 default:
595 break;
596 }
597
598 return status;
599 }
600
601 static VAStatus
handleVAEncMiscParameterTypeQualityLevel(vlVaContext * context,VAEncMiscParameterBuffer * misc)602 handleVAEncMiscParameterTypeQualityLevel(vlVaContext *context, VAEncMiscParameterBuffer *misc)
603 {
604 VAStatus status = VA_STATUS_SUCCESS;
605
606 switch (u_reduce_video_profile(context->templat.profile)) {
607 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
608 status = vlVaHandleVAEncMiscParameterTypeQualityLevelH264(context, misc);
609 break;
610
611 case PIPE_VIDEO_FORMAT_HEVC:
612 status = vlVaHandleVAEncMiscParameterTypeQualityLevelHEVC(context, misc);
613 break;
614
615 #if VA_CHECK_VERSION(1, 16, 0)
616 case PIPE_VIDEO_FORMAT_AV1:
617 status = vlVaHandleVAEncMiscParameterTypeQualityLevelAV1(context, misc);
618 break;
619 #endif
620
621 default:
622 break;
623 }
624
625 return status;
626 }
627
628 static VAStatus
handleVAEncMiscParameterTypeMaxFrameSize(vlVaContext * context,VAEncMiscParameterBuffer * misc)629 handleVAEncMiscParameterTypeMaxFrameSize(vlVaContext *context, VAEncMiscParameterBuffer *misc)
630 {
631 VAStatus status = VA_STATUS_SUCCESS;
632
633 switch (u_reduce_video_profile(context->templat.profile)) {
634 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
635 status = vlVaHandleVAEncMiscParameterTypeMaxFrameSizeH264(context, misc);
636 break;
637
638 case PIPE_VIDEO_FORMAT_HEVC:
639 status = vlVaHandleVAEncMiscParameterTypeMaxFrameSizeHEVC(context, misc);
640 break;
641
642 #if VA_CHECK_VERSION(1, 16, 0)
643 case PIPE_VIDEO_FORMAT_AV1:
644 status = vlVaHandleVAEncMiscParameterTypeMaxFrameSizeAV1(context, misc);
645 break;
646 #endif
647
648 default:
649 break;
650 }
651
652 return status;
653 }
654 static VAStatus
handleVAEncMiscParameterTypeHRD(vlVaContext * context,VAEncMiscParameterBuffer * misc)655 handleVAEncMiscParameterTypeHRD(vlVaContext *context, VAEncMiscParameterBuffer *misc)
656 {
657 VAStatus status = VA_STATUS_SUCCESS;
658
659 switch (u_reduce_video_profile(context->templat.profile)) {
660 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
661 status = vlVaHandleVAEncMiscParameterTypeHRDH264(context, misc);
662 break;
663
664 case PIPE_VIDEO_FORMAT_HEVC:
665 status = vlVaHandleVAEncMiscParameterTypeHRDHEVC(context, misc);
666 break;
667
668 #if VA_CHECK_VERSION(1, 16, 0)
669 case PIPE_VIDEO_FORMAT_AV1:
670 status = vlVaHandleVAEncMiscParameterTypeHRDAV1(context, misc);
671 break;
672 #endif
673
674 default:
675 break;
676 }
677
678 return status;
679 }
680
681 static VAStatus
handleVAEncMiscParameterTypeMaxSliceSize(vlVaContext * context,VAEncMiscParameterBuffer * misc)682 handleVAEncMiscParameterTypeMaxSliceSize(vlVaContext *context, VAEncMiscParameterBuffer *misc)
683 {
684 VAStatus status = VA_STATUS_SUCCESS;
685 VAEncMiscParameterMaxSliceSize *max_slice_size_buffer = (VAEncMiscParameterMaxSliceSize *)misc->data;
686 switch (u_reduce_video_profile(context->templat.profile)) {
687 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
688 {
689 context->desc.h264enc.slice_mode = PIPE_VIDEO_SLICE_MODE_MAX_SLICE_SIZE;
690 context->desc.h264enc.max_slice_bytes = max_slice_size_buffer->max_slice_size;
691 } break;
692 case PIPE_VIDEO_FORMAT_HEVC:
693 {
694 context->desc.h265enc.slice_mode = PIPE_VIDEO_SLICE_MODE_MAX_SLICE_SIZE;
695 context->desc.h265enc.max_slice_bytes = max_slice_size_buffer->max_slice_size;
696 } break;
697 default:
698 break;
699 }
700 return status;
701 }
702
703 static VAStatus
handleVAEncMiscParameterTypeRIR(vlVaContext * context,VAEncMiscParameterBuffer * misc)704 handleVAEncMiscParameterTypeRIR(vlVaContext *context, VAEncMiscParameterBuffer *misc)
705 {
706 VAStatus status = VA_STATUS_SUCCESS;
707 struct pipe_enc_intra_refresh *p_intra_refresh = NULL;
708
709 switch (u_reduce_video_profile(context->templat.profile)) {
710 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
711 p_intra_refresh = &context->desc.h264enc.intra_refresh;
712 break;
713 case PIPE_VIDEO_FORMAT_HEVC:
714 p_intra_refresh = &context->desc.h265enc.intra_refresh;
715 break;
716 #if VA_CHECK_VERSION(1, 16, 0)
717 case PIPE_VIDEO_FORMAT_AV1:
718 p_intra_refresh = &context->desc.av1enc.intra_refresh;
719 break;
720 #endif
721 default:
722 p_intra_refresh = NULL;
723 break;
724 };
725
726 if (p_intra_refresh) {
727 VAEncMiscParameterRIR *ir = (VAEncMiscParameterRIR *)misc->data;
728
729 if (ir->rir_flags.value == VA_ENC_INTRA_REFRESH_ROLLING_ROW)
730 p_intra_refresh->mode = INTRA_REFRESH_MODE_UNIT_ROWS;
731 else if (ir->rir_flags.value == VA_ENC_INTRA_REFRESH_ROLLING_COLUMN)
732 p_intra_refresh->mode = INTRA_REFRESH_MODE_UNIT_COLUMNS;
733 else if (ir->rir_flags.value) /* if any other values to use the default one*/
734 p_intra_refresh->mode = INTRA_REFRESH_MODE_UNIT_COLUMNS;
735 else /* if no mode specified then no intra-refresh */
736 p_intra_refresh->mode = INTRA_REFRESH_MODE_NONE;
737
738 /* intra refresh should be started with sequence level headers */
739 p_intra_refresh->need_sequence_header = 0;
740 if (p_intra_refresh->mode) {
741 p_intra_refresh->region_size = ir->intra_insert_size;
742 p_intra_refresh->offset = ir->intra_insertion_location;
743 if (p_intra_refresh->offset == 0)
744 p_intra_refresh->need_sequence_header = 1;
745 }
746 } else {
747 p_intra_refresh->mode = INTRA_REFRESH_MODE_NONE;
748 p_intra_refresh->region_size = 0;
749 p_intra_refresh->offset = 0;
750 p_intra_refresh->need_sequence_header = 0;
751 }
752
753 return status;
754 }
755
756 static VAStatus
handleVAEncMiscParameterTypeROI(vlVaContext * context,VAEncMiscParameterBuffer * misc)757 handleVAEncMiscParameterTypeROI(vlVaContext *context, VAEncMiscParameterBuffer *misc)
758 {
759 VAStatus status = VA_STATUS_SUCCESS;
760 struct pipe_enc_roi *proi= NULL;
761 switch (u_reduce_video_profile(context->templat.profile)) {
762 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
763 proi = &context->desc.h264enc.roi;
764 break;
765 case PIPE_VIDEO_FORMAT_HEVC:
766 proi = &context->desc.h265enc.roi;
767 break;
768 #if VA_CHECK_VERSION(1, 16, 0)
769 case PIPE_VIDEO_FORMAT_AV1:
770 proi = &context->desc.av1enc.roi;
771 break;
772 #endif
773 default:
774 break;
775 };
776
777 if (proi) {
778 VAEncMiscParameterBufferROI *roi = (VAEncMiscParameterBufferROI *)misc->data;
779 /* do not support priority type, and the maximum region is 32 */
780 if ((roi->num_roi > 0 && roi->roi_flags.bits.roi_value_is_qp_delta == 0)
781 || roi->num_roi > PIPE_ENC_ROI_REGION_NUM_MAX)
782 status = VA_STATUS_ERROR_FLAG_NOT_SUPPORTED;
783 else {
784 uint32_t i;
785 VAEncROI *src = roi->roi;
786
787 proi->num = roi->num_roi;
788 for (i = 0; i < roi->num_roi; i++) {
789 proi->region[i].valid = true;
790 proi->region[i].x = src->roi_rectangle.x;
791 proi->region[i].y = src->roi_rectangle.y;
792 proi->region[i].width = src->roi_rectangle.width;
793 proi->region[i].height = src->roi_rectangle.height;
794 proi->region[i].qp_value = (int32_t)CLAMP(src->roi_value, roi->min_delta_qp, roi->max_delta_qp);
795 src++;
796 }
797
798 for (; i < PIPE_ENC_ROI_REGION_NUM_MAX; i++)
799 proi->region[i].valid = false;
800 }
801 }
802
803 return status;
804 }
805
806 static VAStatus
handleVAEncMiscParameterBufferType(vlVaContext * context,vlVaBuffer * buf)807 handleVAEncMiscParameterBufferType(vlVaContext *context, vlVaBuffer *buf)
808 {
809 VAStatus vaStatus = VA_STATUS_SUCCESS;
810 VAEncMiscParameterBuffer *misc;
811 misc = buf->data;
812
813 switch (misc->type) {
814 case VAEncMiscParameterTypeRateControl:
815 vaStatus = handleVAEncMiscParameterTypeRateControl(context, misc);
816 break;
817
818 case VAEncMiscParameterTypeFrameRate:
819 vaStatus = handleVAEncMiscParameterTypeFrameRate(context, misc);
820 break;
821
822 case VAEncMiscParameterTypeTemporalLayerStructure:
823 vaStatus = handleVAEncMiscParameterTypeTemporalLayer(context, misc);
824 break;
825
826 case VAEncMiscParameterTypeQualityLevel:
827 vaStatus = handleVAEncMiscParameterTypeQualityLevel(context, misc);
828 break;
829
830 case VAEncMiscParameterTypeMaxFrameSize:
831 vaStatus = handleVAEncMiscParameterTypeMaxFrameSize(context, misc);
832 break;
833
834 case VAEncMiscParameterTypeHRD:
835 vaStatus = handleVAEncMiscParameterTypeHRD(context, misc);
836 break;
837
838 case VAEncMiscParameterTypeRIR:
839 vaStatus = handleVAEncMiscParameterTypeRIR(context, misc);
840 break;
841
842 case VAEncMiscParameterTypeMaxSliceSize:
843 vaStatus = handleVAEncMiscParameterTypeMaxSliceSize(context, misc);
844 break;
845
846 case VAEncMiscParameterTypeROI:
847 vaStatus = handleVAEncMiscParameterTypeROI(context, misc);
848 break;
849
850 default:
851 break;
852 }
853
854 return vaStatus;
855 }
856
857 static VAStatus
handleVAEncPictureParameterBufferType(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)858 handleVAEncPictureParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
859 {
860 VAStatus status = VA_STATUS_SUCCESS;
861
862 switch (u_reduce_video_profile(context->templat.profile)) {
863 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
864 status = vlVaHandleVAEncPictureParameterBufferTypeH264(drv, context, buf);
865 break;
866
867 case PIPE_VIDEO_FORMAT_HEVC:
868 status = vlVaHandleVAEncPictureParameterBufferTypeHEVC(drv, context, buf);
869 break;
870
871 #if VA_CHECK_VERSION(1, 16, 0)
872 case PIPE_VIDEO_FORMAT_AV1:
873 status = vlVaHandleVAEncPictureParameterBufferTypeAV1(drv, context, buf);
874 break;
875 #endif
876
877 default:
878 break;
879 }
880
881 return status;
882 }
883
884 static VAStatus
handleVAEncSliceParameterBufferType(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)885 handleVAEncSliceParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
886 {
887 VAStatus status = VA_STATUS_SUCCESS;
888
889 switch (u_reduce_video_profile(context->templat.profile)) {
890 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
891 status = vlVaHandleVAEncSliceParameterBufferTypeH264(drv, context, buf);
892 break;
893
894 case PIPE_VIDEO_FORMAT_HEVC:
895 status = vlVaHandleVAEncSliceParameterBufferTypeHEVC(drv, context, buf);
896 break;
897
898 #if VA_CHECK_VERSION(1, 16, 0)
899 case PIPE_VIDEO_FORMAT_AV1:
900 status = vlVaHandleVAEncSliceParameterBufferTypeAV1(drv, context, buf);
901 break;
902 #endif
903
904 default:
905 break;
906 }
907
908 return status;
909 }
910
911 static VAStatus
handleVAEncPackedHeaderParameterBufferType(vlVaContext * context,vlVaBuffer * buf)912 handleVAEncPackedHeaderParameterBufferType(vlVaContext *context, vlVaBuffer *buf)
913 {
914 VAEncPackedHeaderParameterBuffer *param = buf->data;
915
916 context->packed_header_emulation_bytes = param->has_emulation_bytes;
917 context->packed_header_type = param->type;
918
919 return VA_STATUS_SUCCESS;
920 }
921
922 static VAStatus
handleVAEncPackedHeaderDataBufferType(vlVaContext * context,vlVaBuffer * buf)923 handleVAEncPackedHeaderDataBufferType(vlVaContext *context, vlVaBuffer *buf)
924 {
925 VAStatus status = VA_STATUS_SUCCESS;
926
927 switch (u_reduce_video_profile(context->templat.profile)) {
928 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
929 status = vlVaHandleVAEncPackedHeaderDataBufferTypeH264(context, buf);
930 break;
931
932 case PIPE_VIDEO_FORMAT_HEVC:
933 status = vlVaHandleVAEncPackedHeaderDataBufferTypeHEVC(context, buf);
934 break;
935
936 #if VA_CHECK_VERSION(1, 16, 0)
937 case PIPE_VIDEO_FORMAT_AV1:
938 status = vlVaHandleVAEncPackedHeaderDataBufferTypeAV1(context, buf);
939 break;
940 #endif
941
942 default:
943 break;
944 }
945
946 return status;
947 }
948
949 static VAStatus
handleVAStatsStatisticsBufferType(VADriverContextP ctx,vlVaContext * context,vlVaBuffer * buf)950 handleVAStatsStatisticsBufferType(VADriverContextP ctx, vlVaContext *context, vlVaBuffer *buf)
951 {
952 if (context->decoder->entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE)
953 return VA_STATUS_ERROR_UNIMPLEMENTED;
954
955 vlVaDriver *drv;
956 drv = VL_VA_DRIVER(ctx);
957
958 if (!drv)
959 return VA_STATUS_ERROR_INVALID_CONTEXT;
960
961 if (!buf->derived_surface.resource)
962 buf->derived_surface.resource = pipe_buffer_create(drv->pipe->screen, PIPE_BIND_VERTEX_BUFFER,
963 PIPE_USAGE_STREAM, buf->size);
964
965 context->target->statistics_data = buf->derived_surface.resource;
966
967 return VA_STATUS_SUCCESS;
968 }
969
970 VAStatus
vlVaRenderPicture(VADriverContextP ctx,VAContextID context_id,VABufferID * buffers,int num_buffers)971 vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buffers, int num_buffers)
972 {
973 vlVaDriver *drv;
974 vlVaContext *context;
975 VAStatus vaStatus = VA_STATUS_SUCCESS;
976
977 unsigned i;
978
979 if (!ctx)
980 return VA_STATUS_ERROR_INVALID_CONTEXT;
981
982 drv = VL_VA_DRIVER(ctx);
983 if (!drv)
984 return VA_STATUS_ERROR_INVALID_CONTEXT;
985
986 mtx_lock(&drv->mutex);
987 context = handle_table_get(drv->htab, context_id);
988 if (!context) {
989 mtx_unlock(&drv->mutex);
990 return VA_STATUS_ERROR_INVALID_CONTEXT;
991 }
992
993 /* Always process VAProtectedSliceDataBufferType first because it changes the state */
994 for (i = 0; i < num_buffers; ++i) {
995 vlVaBuffer *buf = handle_table_get(drv->htab, buffers[i]);
996 if (!buf) {
997 mtx_unlock(&drv->mutex);
998 return VA_STATUS_ERROR_INVALID_BUFFER;
999 }
1000
1001 if (buf->type == VAProtectedSliceDataBufferType)
1002 handleVAProtectedSliceDataBufferType(context, buf);
1003 }
1004
1005 for (i = 0; i < num_buffers && vaStatus == VA_STATUS_SUCCESS; ++i) {
1006 vlVaBuffer *buf = handle_table_get(drv->htab, buffers[i]);
1007
1008 switch (buf->type) {
1009 case VAPictureParameterBufferType:
1010 vaStatus = handlePictureParameterBuffer(drv, context, buf);
1011 break;
1012
1013 case VAIQMatrixBufferType:
1014 handleIQMatrixBuffer(context, buf);
1015 break;
1016
1017 case VASliceParameterBufferType:
1018 handleSliceParameterBuffer(context, buf);
1019 context->have_slice_params = true;
1020 break;
1021
1022 case VASliceDataBufferType:
1023 vaStatus = handleVASliceDataBufferType(context, buf);
1024 /* Workaround for apps sending single slice data buffer followed
1025 * by multiple slice parameter buffers. */
1026 if (context->have_slice_params)
1027 context->slice_data_offset += buf->size;
1028 break;
1029
1030 case VAProcPipelineParameterBufferType:
1031 vaStatus = vlVaHandleVAProcPipelineParameterBufferType(drv, context, buf);
1032 break;
1033
1034 case VAEncSequenceParameterBufferType:
1035 vaStatus = handleVAEncSequenceParameterBufferType(drv, context, buf);
1036 break;
1037
1038 case VAEncMiscParameterBufferType:
1039 vaStatus = handleVAEncMiscParameterBufferType(context, buf);
1040 break;
1041
1042 case VAEncPictureParameterBufferType:
1043 vaStatus = handleVAEncPictureParameterBufferType(drv, context, buf);
1044 break;
1045
1046 case VAEncSliceParameterBufferType:
1047 vaStatus = handleVAEncSliceParameterBufferType(drv, context, buf);
1048 break;
1049
1050 case VAHuffmanTableBufferType:
1051 vlVaHandleHuffmanTableBufferType(context, buf);
1052 break;
1053
1054 case VAEncPackedHeaderParameterBufferType:
1055 handleVAEncPackedHeaderParameterBufferType(context, buf);
1056 break;
1057 case VAEncPackedHeaderDataBufferType:
1058 handleVAEncPackedHeaderDataBufferType(context, buf);
1059 break;
1060
1061 case VAStatsStatisticsBufferType:
1062 handleVAStatsStatisticsBufferType(ctx, context, buf);
1063 break;
1064
1065 default:
1066 break;
1067 }
1068 }
1069
1070 if (context->decoder &&
1071 context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM &&
1072 context->bs.num_buffers) {
1073 context->decoder->decode_bitstream(context->decoder, context->target, &context->desc.base,
1074 context->bs.num_buffers, (const void * const*)context->bs.buffers, context->bs.sizes);
1075 context->bs.num_buffers = 0;
1076 }
1077
1078 mtx_unlock(&drv->mutex);
1079
1080 return vaStatus;
1081 }
1082
vlVaQueryApplyFilmGrainAV1(vlVaContext * context,int * output_id,struct pipe_video_buffer *** out_target)1083 static bool vlVaQueryApplyFilmGrainAV1(vlVaContext *context,
1084 int *output_id,
1085 struct pipe_video_buffer ***out_target)
1086 {
1087 struct pipe_av1_picture_desc *av1 = NULL;
1088
1089 if (u_reduce_video_profile(context->templat.profile) != PIPE_VIDEO_FORMAT_AV1 ||
1090 context->decoder->entrypoint != PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
1091 return false;
1092
1093 av1 = &context->desc.av1;
1094 if (!av1->picture_parameter.film_grain_info.film_grain_info_fields.apply_grain)
1095 return false;
1096
1097 *output_id = av1->picture_parameter.current_frame_id;
1098 *out_target = &av1->film_grain_target;
1099 return true;
1100 }
1101
vlVaClearRawHeaders(struct util_dynarray * headers)1102 static void vlVaClearRawHeaders(struct util_dynarray *headers)
1103 {
1104 util_dynarray_foreach(headers, struct pipe_enc_raw_header, header)
1105 FREE(header->buffer);
1106 util_dynarray_clear(headers);
1107 }
1108
1109 VAStatus
vlVaEndPicture(VADriverContextP ctx,VAContextID context_id)1110 vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
1111 {
1112 vlVaDriver *drv;
1113 vlVaContext *context;
1114 vlVaBuffer *coded_buf;
1115 vlVaSurface *surf;
1116 void *feedback = NULL;
1117 struct pipe_screen *screen;
1118 bool supported;
1119 bool realloc = false;
1120 bool apply_av1_fg = false;
1121 enum pipe_format format;
1122 struct pipe_video_buffer **out_target;
1123 int output_id;
1124
1125 if (!ctx)
1126 return VA_STATUS_ERROR_INVALID_CONTEXT;
1127
1128 drv = VL_VA_DRIVER(ctx);
1129 if (!drv)
1130 return VA_STATUS_ERROR_INVALID_CONTEXT;
1131
1132 mtx_lock(&drv->mutex);
1133 context = handle_table_get(drv->htab, context_id);
1134 mtx_unlock(&drv->mutex);
1135 if (!context)
1136 return VA_STATUS_ERROR_INVALID_CONTEXT;
1137
1138 if (!context->decoder) {
1139 if (context->templat.profile != PIPE_VIDEO_PROFILE_UNKNOWN)
1140 return VA_STATUS_ERROR_INVALID_CONTEXT;
1141
1142 /* VPP */
1143 return VA_STATUS_SUCCESS;
1144 }
1145
1146 output_id = context->target_id;
1147 out_target = &context->target;
1148 apply_av1_fg = vlVaQueryApplyFilmGrainAV1(context, &output_id, &out_target);
1149
1150 mtx_lock(&drv->mutex);
1151 surf = handle_table_get(drv->htab, output_id);
1152 vlVaGetSurfaceBuffer(drv, surf);
1153 if (!surf || !surf->buffer) {
1154 mtx_unlock(&drv->mutex);
1155 return VA_STATUS_ERROR_INVALID_SURFACE;
1156 }
1157
1158 if (apply_av1_fg) {
1159 vlVaSetSurfaceContext(drv, surf, context);
1160 *out_target = surf->buffer;
1161 }
1162
1163 context->mpeg4.frame_num++;
1164
1165 screen = context->decoder->context->screen;
1166 supported = screen->get_video_param(screen, context->decoder->profile,
1167 context->decoder->entrypoint,
1168 surf->buffer->interlaced ?
1169 PIPE_VIDEO_CAP_SUPPORTS_INTERLACED :
1170 PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE);
1171
1172 if (!supported) {
1173 surf->templat.interlaced = screen->get_video_param(screen,
1174 context->decoder->profile,
1175 context->decoder->entrypoint,
1176 PIPE_VIDEO_CAP_PREFERS_INTERLACED);
1177 realloc = true;
1178 }
1179
1180 format = screen->get_video_param(screen, context->decoder->profile,
1181 context->decoder->entrypoint,
1182 PIPE_VIDEO_CAP_PREFERED_FORMAT);
1183
1184 if (surf->buffer->buffer_format != format &&
1185 surf->buffer->buffer_format == PIPE_FORMAT_NV12) {
1186 /* check originally as NV12 only */
1187 surf->templat.buffer_format = format;
1188 realloc = true;
1189 }
1190
1191 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_JPEG) {
1192 if (surf->buffer->buffer_format == PIPE_FORMAT_NV12 &&
1193 context->mjpeg.sampling_factor != MJPEG_SAMPLING_FACTOR_NV12) {
1194 /* workaround to reallocate surface buffer with right format
1195 * if it doesnt match with sampling_factor. ffmpeg doesnt
1196 * use VASurfaceAttribPixelFormat and defaults to NV12.
1197 */
1198 switch (context->mjpeg.sampling_factor) {
1199 case MJPEG_SAMPLING_FACTOR_YUV422:
1200 case MJPEG_SAMPLING_FACTOR_YUY2:
1201 surf->templat.buffer_format = PIPE_FORMAT_YUYV;
1202 break;
1203 case MJPEG_SAMPLING_FACTOR_YUV444:
1204 surf->templat.buffer_format = PIPE_FORMAT_Y8_U8_V8_444_UNORM;
1205 break;
1206 case MJPEG_SAMPLING_FACTOR_YUV400:
1207 surf->templat.buffer_format = PIPE_FORMAT_Y8_400_UNORM;
1208 break;
1209 default:
1210 mtx_unlock(&drv->mutex);
1211 return VA_STATUS_ERROR_INVALID_SURFACE;
1212 }
1213 realloc = true;
1214 }
1215 /* check if format is supported before proceeding with realloc,
1216 * also avoid submission if hardware doesnt support the format and
1217 * applcation failed to check the supported rt_formats.
1218 */
1219 if (!screen->is_video_format_supported(screen, surf->templat.buffer_format,
1220 PIPE_VIDEO_PROFILE_JPEG_BASELINE, PIPE_VIDEO_ENTRYPOINT_BITSTREAM)) {
1221 mtx_unlock(&drv->mutex);
1222 return VA_STATUS_ERROR_INVALID_SURFACE;
1223 }
1224 }
1225
1226 if ((bool)(surf->templat.bind & PIPE_BIND_PROTECTED) != context->desc.base.protected_playback) {
1227 if (context->desc.base.protected_playback) {
1228 surf->templat.bind |= PIPE_BIND_PROTECTED;
1229 }
1230 else
1231 surf->templat.bind &= ~PIPE_BIND_PROTECTED;
1232 realloc = true;
1233 }
1234
1235 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_AV1 &&
1236 surf->buffer->buffer_format == PIPE_FORMAT_NV12 &&
1237 context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
1238 if (context->desc.av1.picture_parameter.bit_depth_idx == 1) {
1239 surf->templat.buffer_format = PIPE_FORMAT_P010;
1240 realloc = true;
1241 }
1242 }
1243
1244 if (realloc) {
1245 struct pipe_video_buffer *old_buf = surf->buffer;
1246
1247 if (vlVaHandleSurfaceAllocate(drv, surf, &surf->templat, NULL, 0) != VA_STATUS_SUCCESS) {
1248 mtx_unlock(&drv->mutex);
1249 return VA_STATUS_ERROR_ALLOCATION_FAILED;
1250 }
1251
1252 if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
1253 if (old_buf->interlaced) {
1254 struct u_rect src_rect, dst_rect;
1255
1256 dst_rect.x0 = src_rect.x0 = 0;
1257 dst_rect.y0 = src_rect.y0 = 0;
1258 dst_rect.x1 = src_rect.x1 = surf->templat.width;
1259 dst_rect.y1 = src_rect.y1 = surf->templat.height;
1260 vl_compositor_yuv_deint_full(&drv->cstate, &drv->compositor,
1261 old_buf, surf->buffer,
1262 &src_rect, &dst_rect, VL_COMPOSITOR_WEAVE);
1263 } else {
1264 /* Can't convert from progressive to interlaced yet */
1265 mtx_unlock(&drv->mutex);
1266 return VA_STATUS_ERROR_INVALID_SURFACE;
1267 }
1268 }
1269
1270 old_buf->destroy(old_buf);
1271 *out_target = surf->buffer;
1272 }
1273
1274 if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
1275 context->desc.base.fence = &surf->fence;
1276 struct pipe_screen *screen = context->decoder->context->screen;
1277 coded_buf = context->coded_buf;
1278 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC)
1279 context->desc.h264enc.frame_num_cnt++;
1280
1281 if (surf->efc_surface) {
1282 assert(surf == drv->last_efc_surface);
1283 context->target = surf->efc_surface->buffer;
1284 context->desc.base.input_format = surf->efc_surface->buffer->buffer_format;
1285 context->desc.base.output_format = surf->buffer->buffer_format;
1286 surf->efc_surface = NULL;
1287 drv->last_efc_surface = NULL;
1288 } else {
1289 context->desc.base.input_format = surf->buffer->buffer_format;
1290 context->desc.base.output_format = surf->buffer->buffer_format;
1291 }
1292 context->desc.base.input_full_range = surf->full_range;
1293
1294 if (screen->is_video_target_buffer_supported &&
1295 !screen->is_video_target_buffer_supported(screen,
1296 context->desc.base.output_format,
1297 context->target,
1298 context->decoder->profile,
1299 context->decoder->entrypoint)) {
1300 mtx_unlock(&drv->mutex);
1301 return VA_STATUS_ERROR_INVALID_SURFACE;
1302 }
1303
1304 int driver_metadata_support = drv->pipe->screen->get_video_param(drv->pipe->screen,
1305 context->decoder->profile,
1306 context->decoder->entrypoint,
1307 PIPE_VIDEO_CAP_ENC_SUPPORTS_FEEDBACK_METADATA);
1308 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC)
1309 context->desc.h264enc.requested_metadata = driver_metadata_support;
1310 else if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_HEVC)
1311 context->desc.h265enc.requested_metadata = driver_metadata_support;
1312 else if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_AV1)
1313 context->desc.av1enc.requested_metadata = driver_metadata_support;
1314
1315 context->decoder->begin_frame(context->decoder, context->target, &context->desc.base);
1316 context->decoder->encode_bitstream(context->decoder, context->target,
1317 coded_buf->derived_surface.resource, &feedback);
1318 coded_buf->feedback = feedback;
1319 coded_buf->ctx = context_id;
1320 surf->feedback = feedback;
1321 surf->coded_buf = coded_buf;
1322 coded_buf->associated_encode_input_surf = context->target_id;
1323 } else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
1324 context->desc.base.fence = &surf->fence;
1325 } else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_PROCESSING) {
1326 context->desc.base.fence = &surf->fence;
1327 }
1328
1329 /* when there are external handles, we can't set PIPE_FLUSH_ASYNC */
1330 if (context->desc.base.fence)
1331 context->desc.base.flush_flags = drv->has_external_handles ? 0 : PIPE_FLUSH_ASYNC;
1332
1333 if (context->decoder->end_frame(context->decoder, context->target, &context->desc.base) != 0) {
1334 mtx_unlock(&drv->mutex);
1335 return VA_STATUS_ERROR_OPERATION_FAILED;
1336 }
1337
1338 if (drv->pipe->screen->get_video_param(drv->pipe->screen,
1339 context->decoder->profile,
1340 context->decoder->entrypoint,
1341 PIPE_VIDEO_CAP_REQUIRES_FLUSH_ON_END_FRAME))
1342 context->decoder->flush(context->decoder);
1343 else {
1344 if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE &&
1345 u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC) {
1346 int idr_period = context->desc.h264enc.gop_size / context->gop_coeff;
1347 int p_remain_in_idr = idr_period - context->desc.h264enc.frame_num;
1348 surf->frame_num_cnt = context->desc.h264enc.frame_num_cnt;
1349 surf->force_flushed = false;
1350 if (context->first_single_submitted) {
1351 context->decoder->flush(context->decoder);
1352 context->first_single_submitted = false;
1353 surf->force_flushed = true;
1354 }
1355 if (p_remain_in_idr == 1) {
1356 if ((context->desc.h264enc.frame_num_cnt % 2) != 0) {
1357 context->decoder->flush(context->decoder);
1358 context->first_single_submitted = true;
1359 }
1360 else
1361 context->first_single_submitted = false;
1362 surf->force_flushed = true;
1363 }
1364 }
1365 }
1366
1367 if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
1368 switch (u_reduce_video_profile(context->templat.profile)) {
1369 case PIPE_VIDEO_FORMAT_AV1:
1370 context->desc.av1enc.frame_num++;
1371 break;
1372 case PIPE_VIDEO_FORMAT_HEVC:
1373 context->desc.h265enc.frame_num++;
1374 vlVaClearRawHeaders(&context->desc.h265enc.raw_headers);
1375 break;
1376 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
1377 if (!context->desc.h264enc.not_referenced)
1378 context->desc.h264enc.frame_num++;
1379 vlVaClearRawHeaders(&context->desc.h264enc.raw_headers);
1380 break;
1381 default:
1382 break;
1383 }
1384 }
1385
1386 mtx_unlock(&drv->mutex);
1387 return VA_STATUS_SUCCESS;
1388 }
1389
1390 void
vlVaAddRawHeader(struct util_dynarray * headers,uint8_t type,uint32_t size,uint8_t * buf,bool is_slice,uint32_t emulation_bytes_start)1391 vlVaAddRawHeader(struct util_dynarray *headers, uint8_t type, uint32_t size,
1392 uint8_t *buf, bool is_slice, uint32_t emulation_bytes_start)
1393 {
1394 struct pipe_enc_raw_header header = {
1395 .type = type,
1396 .is_slice = is_slice,
1397 };
1398 if (emulation_bytes_start) {
1399 uint32_t pos = emulation_bytes_start, num_zeros = 0;
1400 header.buffer = MALLOC(size * 3 / 2);
1401 memcpy(header.buffer, buf, emulation_bytes_start);
1402 for (uint32_t i = emulation_bytes_start; i < size; i++) {
1403 uint8_t byte = buf[i];
1404 if (num_zeros >= 2 && byte >= 0x00 && byte <= 0x03) {
1405 header.buffer[pos++] = 0x03;
1406 num_zeros = 0;
1407 }
1408 header.buffer[pos++] = byte;
1409 num_zeros = byte == 0x00 ? num_zeros + 1 : 0;
1410 }
1411 header.size = pos;
1412 } else {
1413 header.size = size;
1414 header.buffer = MALLOC(header.size);
1415 memcpy(header.buffer, buf, size);
1416 }
1417 util_dynarray_append(headers, struct pipe_enc_raw_header, header);
1418 }
1419