1 /* Copyright 2022 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24 #include <string.h>
25 #include "vpe_priv.h"
26 #include "common.h"
27 #include "vpe10_resource.h"
28 #include "vpe10_cmd_builder.h"
29 #include "vpe10_vpec.h"
30 #include "vpe10_cdc.h"
31 #include "vpe10_dpp.h"
32 #include "vpe10_mpc.h"
33 #include "vpe10_opp.h"
34 #include "vpe10_command.h"
35 #include "vpe10_cm_common.h"
36 #include "vpe10_background.h"
37 #include "vpe10_vpe_desc_writer.h"
38 #include "vpe10_plane_desc_writer.h"
39 #include "vpe10/inc/asic/bringup_vpe_6_1_0_offset.h"
40 #include "vpe10/inc/asic/bringup_vpe_6_1_0_sh_mask.h"
41 #include "vpe10/inc/asic/bringup_vpe_6_1_0_default.h"
42 #include "vpe10/inc/asic/vpe_1_0_offset.h"
43 #include "custom_fp16.h"
44 #include "custom_float.h"
45 #include "background.h"
46 #include "vpe_visual_confirm.h"
47 #include "color_bg.h"
48
49 #define LUT_NUM_ENTRIES (17 * 17 * 17)
50 #define LUT_ENTRY_SIZE (2)
51 #define LUT_NUM_COMPONENT (3)
52 #define LUT_BUFFER_SIZE (LUT_NUM_ENTRIES * LUT_ENTRY_SIZE * LUT_NUM_COMPONENT)
53 // set field/register/bitfield name
54 #define SFRB(field_name, reg_name, post_fix) .field_name = reg_name##__##field_name##post_fix
55
56 #define BASE_INNER(seg_id) VPE_BASE__INST0_SEG##seg_id
57
58 #define BASE(seg_id) BASE_INNER(seg_id)
59
60 // set register with block id and default val, init lastWrittenVal as default while isWritten set to
61 // false
62 #define SRIDFVL(reg_name, block, id) \
63 .reg_name = {BASE(reg##reg_name##_BASE_IDX) + reg##reg_name, reg##reg_name##_##DEFAULT, \
64 reg##reg_name##_##DEFAULT, false}
65
66 #define SRIDFVL1(reg_name) \
67 .reg_name = {BASE(reg##reg_name##_BASE_IDX) + reg##reg_name, reg##reg_name##_##DEFAULT, \
68 reg##reg_name##_##DEFAULT, false}
69
70 #define SRIDFVL2(reg_name, block, id) \
71 .block##_##reg_name = {BASE(reg##block##id##_##reg_name##_BASE_IDX) + reg##block##id##_##reg_name, \
72 reg##block##id##_##reg_name##_##DEFAULT, reg##block##id##_##reg_name##_##DEFAULT, false}
73
74 #define SRIDFVL3(reg_name, block, id) \
75 .block##_##reg_name = {BASE(reg##block##_##reg_name##_BASE_IDX) + reg##block##_##reg_name, \
76 reg##block##_##reg_name##_##DEFAULT, reg##block##_##reg_name##_##DEFAULT, false}
77
78 /***************** CDC registers ****************/
79 #define cdc_regs(id) [id] = {CDC_REG_LIST_VPE10(id)}
80
81 static struct vpe10_cdc_registers cdc_regs[] = {cdc_regs(0)};
82
83 static const struct vpe10_cdc_shift cdc_shift = {CDC_FLIED_LIST_VPE10(__SHIFT)};
84
85 static const struct vpe10_cdc_mask cdc_mask = {CDC_FLIED_LIST_VPE10(_MASK)};
86
87 /***************** DPP registers ****************/
88 #define dpp_regs(id) [id] = {DPP_REG_LIST_VPE10(id)}
89
90 static struct vpe10_dpp_registers dpp_regs[] = {dpp_regs(0)};
91
92 static const struct vpe10_dpp_shift dpp_shift = {DPP_FIELD_LIST_VPE10(__SHIFT)};
93
94 static const struct vpe10_dpp_mask dpp_mask = {DPP_FIELD_LIST_VPE10(_MASK)};
95
96 /***************** MPC registers ****************/
97 #define mpc_regs(id) [id] = {MPC_REG_LIST_VPE10(id)}
98
99 static struct vpe10_mpc_registers mpc_regs[] = {mpc_regs(0)};
100
101 static const struct vpe10_mpc_shift mpc_shift = {MPC_FIELD_LIST_VPE10(__SHIFT)};
102
103 static const struct vpe10_mpc_mask mpc_mask = {MPC_FIELD_LIST_VPE10(_MASK)};
104
105 /***************** OPP registers ****************/
106 #define opp_regs(id) [id] = {OPP_REG_LIST_VPE10(id)}
107
108 static struct vpe10_opp_registers opp_regs[] = {opp_regs(0)};
109
110 static const struct vpe10_opp_shift opp_shift = {OPP_FIELD_LIST_VPE10(__SHIFT)};
111
112 static const struct vpe10_opp_mask opp_mask = {OPP_FIELD_LIST_VPE10(_MASK)};
113
114 static struct vpe_caps caps = {
115 .lut_size = LUT_BUFFER_SIZE,
116 .rotation_support = 0,
117 .h_mirror_support = 1,
118 .v_mirror_support = 0,
119 .is_apu = 1,
120 .bg_color_check_support = 0,
121 .resource_caps =
122 {
123 .num_dpp = 1,
124 .num_opp = 1,
125 .num_mpc_3dlut = 1,
126 .num_queue = 8,
127 },
128 .color_caps = {.dpp =
129 {
130 .pre_csc = 1,
131 .luma_key = 0,
132 .dgam_ram = 0,
133 .post_csc = 1,
134 .gamma_corr = 1,
135 .hw_3dlut = 1,
136 .ogam_ram = 1, /**< programmable gam in output -> gamma_corr */
137 .ocsc = 0,
138 .dgam_rom_caps =
139 {
140 .srgb = 1,
141 .bt2020 = 1,
142 .gamma2_2 = 1,
143 .pq = 1,
144 .hlg = 1,
145 },
146 },
147 .mpc =
148 {
149 .gamut_remap = 1,
150 .ogam_ram = 1,
151 .ocsc = 1,
152 .shared_3d_lut = 1,
153 .global_alpha = 1,
154 .top_bottom_blending = 0,
155 }},
156 .plane_caps =
157 {
158 .per_pixel_alpha = 1,
159 .input_pixel_format_support =
160 {
161 .argb_packed_32b = 1,
162 .nv12 = 1,
163 .fp16 = 0,
164 .p010 = 1, /**< planar 4:2:0 10-bit */
165 .p016 = 0, /**< planar 4:2:0 16-bit */
166 .ayuv = 0, /**< packed 4:4:4 */
167 .yuy2 = 0
168 },
169 .output_pixel_format_support =
170 {
171 .argb_packed_32b = 1,
172 .nv12 = 0,
173 .fp16 = 1,
174 .p010 = 0, /**< planar 4:2:0 10-bit */
175 .p016 = 0, /**< planar 4:2:0 16-bit */
176 .ayuv = 0, /**< packed 4:4:4 */
177 .yuy2 = 0
178 },
179 .max_upscale_factor = 64000,
180
181 /*
182 * 4:1 downscaling ratio : 1000 / 4 = 250
183 * vpelib does not support more than 4:1 to preserve quality
184 * due to the limitation of using maximum number of 8 taps
185 */
186 .max_downscale_factor = 250,
187
188 .pitch_alignment = 256,
189 .addr_alignment = 256,
190 .max_viewport_width = 1024,
191 },
192 };
193
vpe10_init_scaler_data(struct vpe_priv * vpe_priv,struct stream_ctx * stream_ctx,struct scaler_data * scl_data,struct vpe_rect * src_rect,struct vpe_rect * dst_rect)194 static bool vpe10_init_scaler_data(struct vpe_priv *vpe_priv, struct stream_ctx *stream_ctx,
195 struct scaler_data *scl_data, struct vpe_rect *src_rect, struct vpe_rect *dst_rect)
196 {
197 struct dpp *dpp;
198 dpp = vpe_priv->resource.dpp[0];
199
200 calculate_scaling_ratios(scl_data, src_rect, dst_rect, stream_ctx->stream.surface_info.format);
201
202 scl_data->taps.v_taps = stream_ctx->stream.scaling_info.taps.v_taps;
203 scl_data->taps.h_taps = stream_ctx->stream.scaling_info.taps.h_taps;
204 scl_data->taps.v_taps_c = stream_ctx->stream.scaling_info.taps.v_taps_c;
205 scl_data->taps.h_taps_c = stream_ctx->stream.scaling_info.taps.h_taps_c;
206 if (!vpe_priv->init.debug.skip_optimal_tap_check) {
207 if (!dpp->funcs->get_optimal_number_of_taps(src_rect, dst_rect, &scl_data->taps)) {
208 return false;
209 }
210 }
211
212 if ((stream_ctx->stream.use_external_scaling_coeffs ==
213 false) || /* don't try to optimize is the scaler is configured externally*/
214 (stream_ctx->stream.polyphase_scaling_coeffs.taps.h_taps == 0) ||
215 (stream_ctx->stream.polyphase_scaling_coeffs.taps.v_taps == 0)) {
216 scl_data->polyphase_filter_coeffs = 0;
217 } else {
218 if ((stream_ctx->stream.polyphase_scaling_coeffs.taps.h_taps !=
219 stream_ctx->stream.scaling_info.taps.h_taps) ||
220 (stream_ctx->stream.polyphase_scaling_coeffs.taps.v_taps !=
221 stream_ctx->stream.scaling_info.taps.v_taps)) {
222 return false; // sanity check to make sure the taps structures are the same
223 }
224 scl_data->taps = stream_ctx->stream.polyphase_scaling_coeffs
225 .taps; /* use the extenally provided tap configuration*/
226 scl_data->polyphase_filter_coeffs = &stream_ctx->stream.polyphase_scaling_coeffs;
227 }
228 // bypass scaler if all ratios are 1
229 if (IDENTITY_RATIO(scl_data->ratios.horz))
230 scl_data->taps.h_taps = 1;
231 if (IDENTITY_RATIO(scl_data->ratios.vert))
232 scl_data->taps.v_taps = 1;
233
234 return true;
235 }
236
vpe10_set_num_segments(struct vpe_priv * vpe_priv,struct stream_ctx * stream_ctx,struct scaler_data * scl_data,struct vpe_rect * src_rect,struct vpe_rect * dst_rect,uint32_t * max_seg_width)237 enum vpe_status vpe10_set_num_segments(struct vpe_priv *vpe_priv, struct stream_ctx *stream_ctx,
238 struct scaler_data *scl_data, struct vpe_rect *src_rect, struct vpe_rect *dst_rect,
239 uint32_t *max_seg_width)
240 {
241
242 uint16_t num_segs;
243 struct dpp *dpp = vpe_priv->resource.dpp[0];
244 const uint32_t max_lb_size = dpp->funcs->get_line_buffer_size();
245
246 *max_seg_width = min(*max_seg_width, max_lb_size / scl_data->taps.v_taps);
247
248 num_segs = vpe_get_num_segments(vpe_priv, src_rect, dst_rect, *max_seg_width);
249
250 stream_ctx->segment_ctx = vpe_alloc_segment_ctx(vpe_priv, num_segs);
251 if (!stream_ctx->segment_ctx)
252 return VPE_STATUS_NO_MEMORY;
253
254 stream_ctx->num_segments = num_segs;
255
256 return VPE_STATUS_OK;
257 }
258
vpe10_get_dcc_compression_output_cap(const struct vpe * vpe,const struct vpe_dcc_surface_param * params,struct vpe_surface_dcc_cap * cap)259 bool vpe10_get_dcc_compression_output_cap(const struct vpe *vpe, const struct vpe_dcc_surface_param *params, struct vpe_surface_dcc_cap *cap)
260 {
261 cap->capable = false;
262 return cap->capable;
263 }
264
vpe10_get_dcc_compression_input_cap(const struct vpe * vpe,const struct vpe_dcc_surface_param * params,struct vpe_surface_dcc_cap * cap)265 bool vpe10_get_dcc_compression_input_cap(const struct vpe *vpe, const struct vpe_dcc_surface_param *params, struct vpe_surface_dcc_cap *cap)
266 {
267 cap->capable = false;
268 return cap->capable;
269 }
270
271 static struct vpe_cap_funcs cap_funcs =
272 {
273 .get_dcc_compression_output_cap = vpe10_get_dcc_compression_output_cap,
274 .get_dcc_compression_input_cap = vpe10_get_dcc_compression_input_cap
275 };
276
vpe10_cdc_create(struct vpe_priv * vpe_priv,int inst)277 struct cdc *vpe10_cdc_create(struct vpe_priv *vpe_priv, int inst)
278 {
279 struct vpe10_cdc *vpe10_cdc = vpe_zalloc(sizeof(struct vpe10_cdc));
280
281 if (!vpe10_cdc)
282 return NULL;
283
284 vpe10_construct_cdc(vpe_priv, &vpe10_cdc->base);
285
286 vpe10_cdc->regs = &cdc_regs[inst];
287 vpe10_cdc->mask = &cdc_mask;
288 vpe10_cdc->shift = &cdc_shift;
289
290 return &vpe10_cdc->base;
291 }
292
vpe10_dpp_create(struct vpe_priv * vpe_priv,int inst)293 struct dpp *vpe10_dpp_create(struct vpe_priv *vpe_priv, int inst)
294 {
295 struct vpe10_dpp *vpe10_dpp = vpe_zalloc(sizeof(struct vpe10_dpp));
296
297 if (!vpe10_dpp)
298 return NULL;
299
300 vpe10_construct_dpp(vpe_priv, &vpe10_dpp->base);
301
302 vpe10_dpp->regs = &dpp_regs[inst];
303 vpe10_dpp->mask = &dpp_mask;
304 vpe10_dpp->shift = &dpp_shift;
305
306 return &vpe10_dpp->base;
307 }
308
vpe10_mpc_create(struct vpe_priv * vpe_priv,int inst)309 struct mpc *vpe10_mpc_create(struct vpe_priv *vpe_priv, int inst)
310 {
311 struct vpe10_mpc *vpe10_mpc = vpe_zalloc(sizeof(struct vpe10_mpc));
312
313 if (!vpe10_mpc)
314 return NULL;
315
316 vpe10_construct_mpc(vpe_priv, &vpe10_mpc->base);
317
318 vpe10_mpc->regs = &mpc_regs[inst];
319 vpe10_mpc->mask = &mpc_mask;
320 vpe10_mpc->shift = &mpc_shift;
321
322 return &vpe10_mpc->base;
323 }
324
vpe10_opp_create(struct vpe_priv * vpe_priv,int inst)325 struct opp *vpe10_opp_create(struct vpe_priv *vpe_priv, int inst)
326 {
327 struct vpe10_opp *vpe10_opp = vpe_zalloc(sizeof(struct vpe10_opp));
328
329 if (!vpe10_opp)
330 return NULL;
331
332 vpe10_construct_opp(vpe_priv, &vpe10_opp->base);
333
334 vpe10_opp->regs = &opp_regs[inst];
335 vpe10_opp->mask = &opp_mask;
336 vpe10_opp->shift = &opp_shift;
337
338 return &vpe10_opp->base;
339 }
340
vpe10_construct_resource(struct vpe_priv * vpe_priv,struct resource * res)341 enum vpe_status vpe10_construct_resource(struct vpe_priv *vpe_priv, struct resource *res)
342 {
343 struct vpe *vpe = &vpe_priv->pub;
344
345 vpe->caps = ∩︀
346 vpe->cap_funcs = &cap_funcs;
347
348 vpe10_construct_vpec(vpe_priv, &res->vpec);
349
350 res->cdc[0] = vpe10_cdc_create(vpe_priv, 0);
351 if (!res->cdc[0])
352 goto err;
353
354 res->dpp[0] = vpe10_dpp_create(vpe_priv, 0);
355 if (!res->dpp[0])
356 goto err;
357
358 res->mpc[0] = vpe10_mpc_create(vpe_priv, 0);
359 if (!res->mpc[0])
360 goto err;
361
362 res->opp[0] = vpe10_opp_create(vpe_priv, 0);
363 if (!res->opp[0])
364 goto err;
365
366 vpe10_construct_cmd_builder(vpe_priv, &res->cmd_builder);
367 vpe10_construct_vpe_desc_writer(&vpe_priv->vpe_desc_writer);
368 vpe10_construct_plane_desc_writer(&vpe_priv->plane_desc_writer);
369
370 vpe_priv->num_pipe = 1;
371
372 res->internal_hdr_normalization = 1;
373
374 res->check_input_color_space = vpe10_check_input_color_space;
375 res->check_output_color_space = vpe10_check_output_color_space;
376 res->check_h_mirror_support = vpe10_check_h_mirror_support;
377 res->calculate_segments = vpe10_calculate_segments;
378 res->set_num_segments = vpe10_set_num_segments;
379 res->split_bg_gap = vpe10_split_bg_gap;
380 res->calculate_dst_viewport_and_active = vpe10_calculate_dst_viewport_and_active;
381 res->find_bg_gaps = vpe_find_bg_gaps;
382 res->create_bg_segments = vpe_create_bg_segments;
383 res->populate_cmd_info = vpe10_populate_cmd_info;
384 res->program_frontend = vpe10_program_frontend;
385 res->program_backend = vpe10_program_backend;
386 res->get_bufs_req = vpe10_get_bufs_req;
387 res->check_bg_color_support = vpe10_check_bg_color_support;
388
389 return VPE_STATUS_OK;
390 err:
391 vpe10_destroy_resource(vpe_priv, res);
392 return VPE_STATUS_ERROR;
393 }
394
vpe10_destroy_resource(struct vpe_priv * vpe_priv,struct resource * res)395 void vpe10_destroy_resource(struct vpe_priv *vpe_priv, struct resource *res)
396 {
397 if (res->cdc[0] != NULL) {
398 vpe_free(container_of(res->cdc[0], struct vpe10_cdc, base));
399 res->cdc[0] = NULL;
400 }
401
402 if (res->dpp[0] != NULL) {
403 vpe_free(container_of(res->dpp[0], struct vpe10_dpp, base));
404 res->dpp[0] = NULL;
405 }
406
407 if (res->mpc[0] != NULL) {
408 vpe_free(container_of(res->mpc[0], struct vpe10_mpc, base));
409 res->mpc[0] = NULL;
410 }
411
412 if (res->opp[0] != NULL) {
413 vpe_free(container_of(res->opp[0], struct vpe10_opp, base));
414 res->opp[0] = NULL;
415 }
416 }
417
vpe10_check_input_color_space(struct vpe_priv * vpe_priv,enum vpe_surface_pixel_format format,const struct vpe_color_space * vcs)418 bool vpe10_check_input_color_space(struct vpe_priv *vpe_priv, enum vpe_surface_pixel_format format,
419 const struct vpe_color_space *vcs)
420 {
421 enum color_space cs;
422 enum color_transfer_func tf;
423
424 vpe_color_get_color_space_and_tf(vcs, &cs, &tf);
425 if (cs == COLOR_SPACE_UNKNOWN || tf == TRANSFER_FUNC_UNKNOWN)
426 return false;
427
428 return true;
429 }
430
vpe10_check_output_color_space(struct vpe_priv * vpe_priv,enum vpe_surface_pixel_format format,const struct vpe_color_space * vcs)431 bool vpe10_check_output_color_space(struct vpe_priv *vpe_priv, enum vpe_surface_pixel_format format,
432 const struct vpe_color_space *vcs)
433 {
434 enum color_space cs;
435 enum color_transfer_func tf;
436
437 // packed 32bit rgb
438 if (vcs->encoding != VPE_PIXEL_ENCODING_RGB)
439 return false;
440
441 vpe_color_get_color_space_and_tf(vcs, &cs, &tf);
442 if (cs == COLOR_SPACE_UNKNOWN || tf == TRANSFER_FUNC_UNKNOWN)
443 return false;
444
445 if (vpe_is_fp16(format) && tf != TRANSFER_FUNC_LINEAR)
446 return false;
447
448 return true;
449 }
450
vpe10_check_h_mirror_support(bool * input_mirror,bool * output_mirror)451 bool vpe10_check_h_mirror_support(bool *input_mirror, bool *output_mirror)
452 {
453 *input_mirror = false;
454 *output_mirror = true;
455 return true;
456 }
457
vpe10_check_bg_color_support(struct vpe_priv * vpe_priv,struct vpe_color * bg_color)458 enum vpe_status vpe10_check_bg_color_support(struct vpe_priv* vpe_priv, struct vpe_color* bg_color)
459 {
460 return vpe_is_valid_bg_color(vpe_priv, bg_color);
461 }
462
vpe10_calculate_dst_viewport_and_active(struct segment_ctx * segment_ctx,uint32_t max_seg_width)463 void vpe10_calculate_dst_viewport_and_active(
464 struct segment_ctx *segment_ctx, uint32_t max_seg_width)
465 {
466 struct scaler_data *data = &segment_ctx->scaler_data;
467 struct stream_ctx *stream_ctx = segment_ctx->stream_ctx;
468 struct vpe_priv *vpe_priv = stream_ctx->vpe_priv;
469 struct vpe_rect *dst_rect = &stream_ctx->stream.scaling_info.dst_rect;
470 struct vpe_rect *target_rect = &vpe_priv->output_ctx.target_rect;
471
472 uint32_t vpc_div = vpe_is_yuv420(vpe_priv->output_ctx.surface.format) ? 2 : 1;
473
474 data->dst_viewport.x = data->recout.x + dst_rect->x;
475 data->dst_viewport.width = data->recout.width;
476
477 // 1st stream will cover the background
478 // extends the v_active to cover the full target_rect's height
479 if (stream_ctx->stream_idx == 0) {
480 data->recout.x = 0;
481 data->recout.y = dst_rect->y - target_rect->y;
482 data->dst_viewport.y = target_rect->y;
483 data->dst_viewport.height = target_rect->height;
484
485 if (!stream_ctx->flip_horizonal_output) {
486 /* first segment :
487 * if the dst_viewport.width is not 1024,
488 * and we need background on the left, extend the active to cover as much as it can
489 */
490 if (segment_ctx->segment_idx == 0) {
491 uint32_t remain_gap = min(max_seg_width - data->dst_viewport.width,
492 (uint32_t)(data->dst_viewport.x - target_rect->x));
493 data->recout.x = (int32_t)remain_gap;
494
495 data->dst_viewport.x -= (int32_t)remain_gap;
496 data->dst_viewport.width += remain_gap;
497 }
498 // last segment
499 if (segment_ctx->segment_idx == stream_ctx->num_segments - 1) {
500 uint32_t remain_gap = min(max_seg_width - data->dst_viewport.width,
501 (uint32_t)((target_rect->x + (int32_t)target_rect->width) -
502 (data->dst_viewport.x + (int32_t)data->dst_viewport.width)));
503
504 data->dst_viewport.width += remain_gap;
505 }
506 }
507 } else {
508 data->dst_viewport.y = data->recout.y + dst_rect->y;
509 data->dst_viewport.height = data->recout.height;
510 data->recout.y = 0;
511 data->recout.x = 0;
512 }
513
514 data->dst_viewport_c.x = data->dst_viewport.x / (int32_t)vpc_div;
515 data->dst_viewport_c.y = data->dst_viewport.y / (int32_t)vpc_div;
516 data->dst_viewport_c.width = data->dst_viewport.width / vpc_div;
517 data->dst_viewport_c.height = data->dst_viewport.height / vpc_div;
518
519 // [h/v]_active
520 data->h_active = data->dst_viewport.width;
521 data->v_active = data->dst_viewport.height;
522 }
523
vpe10_calculate_segments(struct vpe_priv * vpe_priv,const struct vpe_build_param * params)524 enum vpe_status vpe10_calculate_segments(
525 struct vpe_priv *vpe_priv, const struct vpe_build_param *params)
526 {
527 enum vpe_status res;
528 struct vpe_rect *gaps;
529 uint16_t gaps_cnt, max_gaps;
530 uint16_t stream_idx, seg_idx;
531 struct stream_ctx *stream_ctx;
532 struct segment_ctx *segment_ctx;
533 uint32_t max_seg_width = vpe_priv->pub.caps->plane_caps.max_viewport_width;
534 struct scaler_data scl_data;
535 struct vpe_rect *src_rect;
536 struct vpe_rect *dst_rect;
537 uint32_t factor;
538 const uint32_t max_upscale_factor = vpe_priv->pub.caps->plane_caps.max_upscale_factor;
539 const uint32_t max_downscale_factor = vpe_priv->pub.caps->plane_caps.max_downscale_factor;
540 struct dpp *dpp = vpe_priv->resource.dpp[0];
541 const uint32_t max_lb_size = dpp->funcs->get_line_buffer_size();
542
543 for (stream_idx = 0; stream_idx < vpe_priv->num_streams; stream_idx++) {
544 stream_ctx = &vpe_priv->stream_ctx[stream_idx];
545 src_rect = &stream_ctx->stream.scaling_info.src_rect;
546 dst_rect = &stream_ctx->stream.scaling_info.dst_rect;
547
548 if (src_rect->width < VPE_MIN_VIEWPORT_SIZE || src_rect->height < VPE_MIN_VIEWPORT_SIZE ||
549 dst_rect->width < VPE_MIN_VIEWPORT_SIZE || dst_rect->height < VPE_MIN_VIEWPORT_SIZE) {
550 return VPE_STATUS_VIEWPORT_SIZE_NOT_SUPPORTED;
551 }
552
553 vpe_clip_stream(src_rect, dst_rect, ¶ms->target_rect);
554
555 if (src_rect->width <= 0 || src_rect->height <= 0 || dst_rect->width <= 0 ||
556 dst_rect->height <= 0) {
557 vpe_log("calculate_segments: after clipping, src or dst rect contains no area. Skip "
558 "this stream.\n");
559 stream_ctx->num_segments = 0;
560 continue;
561 }
562
563 /* If the source frame size in either dimension is 1 then the scaling ratio becomes 0
564 * in that dimension. If destination frame size in any dimesnion is 1 the scaling ratio
565 * is NAN.
566 */
567 if (src_rect->width < VPE_MIN_VIEWPORT_SIZE || src_rect->height < VPE_MIN_VIEWPORT_SIZE ||
568 dst_rect->width < VPE_MIN_VIEWPORT_SIZE || dst_rect->height < VPE_MIN_VIEWPORT_SIZE) {
569 return VPE_STATUS_VIEWPORT_SIZE_NOT_SUPPORTED;
570 }
571 factor = (uint32_t)vpe_fixpt_ceil(
572 vpe_fixpt_from_fraction((1000 * dst_rect->width), src_rect->width));
573 if (factor > max_upscale_factor || factor < max_downscale_factor)
574 return VPE_STATUS_SCALING_RATIO_NOT_SUPPORTED;
575
576 // initialize scaling data
577 if (!vpe10_init_scaler_data(vpe_priv, stream_ctx, &scl_data, src_rect, dst_rect))
578 return VPE_STATUS_SCALING_RATIO_NOT_SUPPORTED;
579
580 res = vpe_priv->resource.set_num_segments(
581 vpe_priv, stream_ctx, &scl_data, src_rect, dst_rect, &max_seg_width);
582 if (res != VPE_STATUS_OK)
583 return res;
584
585 for (seg_idx = 0; seg_idx < stream_ctx->num_segments; seg_idx++) {
586 segment_ctx = &stream_ctx->segment_ctx[seg_idx];
587 segment_ctx->segment_idx = seg_idx;
588 segment_ctx->stream_ctx = stream_ctx;
589
590 segment_ctx->scaler_data.ratios = scl_data.ratios;
591 segment_ctx->scaler_data.taps = scl_data.taps;
592 if (stream_ctx->stream.use_external_scaling_coeffs) {
593 segment_ctx->scaler_data.polyphase_filter_coeffs =
594 &stream_ctx->stream.polyphase_scaling_coeffs;
595 } else {
596 segment_ctx->scaler_data.polyphase_filter_coeffs = 0;
597 }
598 res = vpe_resource_build_scaling_params(segment_ctx);
599 if (res != VPE_STATUS_OK)
600 return res;
601
602 vpe_priv->resource.calculate_dst_viewport_and_active(segment_ctx, max_seg_width);
603 }
604 }
605
606 /* If the stream width is less than max_seg_width - 1024, and it
607 * lies inside a max_seg_width window of the background, vpe needs
608 * an extra bg segment to store that.
609 1 2 3 4 5
610 |....|....|.**.|....|
611 |....|....|.**.|....|
612 |....|....|.**.|....|
613
614 (*: stream
615 .: background
616 |: 1k separator)
617
618 */
619 max_seg_width = vpe_priv->pub.caps->plane_caps.max_viewport_width;
620 max_gaps =
621 (uint16_t)(max((params->target_rect.width + max_seg_width - 1) / max_seg_width, 1) + 1);
622 gaps = vpe_zalloc(sizeof(struct vpe_rect) * max_gaps);
623 if (!gaps)
624 return VPE_STATUS_NO_MEMORY;
625
626 gaps_cnt = vpe_priv->resource.find_bg_gaps(vpe_priv, &(params->target_rect), gaps, max_gaps);
627 if (gaps_cnt > 0)
628 vpe_priv->resource.create_bg_segments(vpe_priv, gaps, gaps_cnt, VPE_CMD_OPS_BG);
629
630 if (gaps != NULL) {
631 vpe_free(gaps);
632 gaps = NULL;
633 }
634
635 vpe_handle_output_h_mirror(vpe_priv);
636
637 res = vpe_priv->resource.populate_cmd_info(vpe_priv);
638
639 if (res == VPE_STATUS_OK)
640 res = vpe_create_visual_confirm_segs(vpe_priv, params, max_seg_width);
641
642 return res;
643 }
644
build_clamping_params(struct opp * opp,struct clamping_and_pixel_encoding_params * clamping)645 static void build_clamping_params(
646 struct opp *opp, struct clamping_and_pixel_encoding_params *clamping)
647 {
648 struct vpe_priv *vpe_priv = opp->vpe_priv;
649 struct vpe_surface_info *dst_surface = &vpe_priv->output_ctx.surface;
650 enum vpe_color_range output_range = dst_surface->cs.range;
651
652 memset(clamping, 0, sizeof(*clamping));
653 clamping->clamping_level = CLAMPING_FULL_RANGE;
654 clamping->c_depth = vpe_get_color_depth(dst_surface->format);
655 if (output_range == VPE_COLOR_RANGE_STUDIO) {
656 if (!vpe_priv->init.debug.clamping_setting) {
657 switch (clamping->c_depth) {
658 case COLOR_DEPTH_888:
659 clamping->clamping_level = CLAMPING_LIMITED_RANGE_8BPC;
660 break;
661 case COLOR_DEPTH_101010:
662 clamping->clamping_level = CLAMPING_LIMITED_RANGE_10BPC;
663 break;
664 case COLOR_DEPTH_121212:
665 clamping->clamping_level = CLAMPING_LIMITED_RANGE_12BPC;
666 break;
667 default:
668 clamping->clamping_level =
669 CLAMPING_FULL_RANGE; // for all the others bit depths set the full range
670 break;
671 }
672 } else {
673 switch (vpe_priv->init.debug.clamping_params.clamping_range) {
674 case VPE_CLAMPING_LIMITED_RANGE_8BPC:
675 clamping->clamping_level = CLAMPING_LIMITED_RANGE_8BPC;
676 break;
677 case VPE_CLAMPING_LIMITED_RANGE_10BPC:
678 clamping->clamping_level = CLAMPING_LIMITED_RANGE_10BPC;
679 break;
680 case VPE_CLAMPING_LIMITED_RANGE_12BPC:
681 clamping->clamping_level = CLAMPING_LIMITED_RANGE_12BPC;
682 break;
683 default:
684 clamping->clamping_level =
685 CLAMPING_LIMITED_RANGE_PROGRAMMABLE; // for all the others set to programmable
686 // range
687 clamping->r_clamp_component_lower =
688 vpe_priv->output_ctx.clamping_params.r_clamp_component_lower;
689 clamping->g_clamp_component_lower =
690 vpe_priv->output_ctx.clamping_params.g_clamp_component_lower;
691 clamping->b_clamp_component_lower =
692 vpe_priv->output_ctx.clamping_params.b_clamp_component_lower;
693 clamping->r_clamp_component_upper =
694 vpe_priv->output_ctx.clamping_params.r_clamp_component_upper;
695 clamping->g_clamp_component_upper =
696 vpe_priv->output_ctx.clamping_params.g_clamp_component_upper;
697 clamping->b_clamp_component_upper =
698 vpe_priv->output_ctx.clamping_params.b_clamp_component_upper;
699 break;
700 }
701 }
702 }
703 }
704
vpe10_program_frontend(struct vpe_priv * vpe_priv,uint32_t pipe_idx,uint32_t cmd_idx,uint32_t cmd_input_idx,bool seg_only)705 int32_t vpe10_program_frontend(struct vpe_priv *vpe_priv, uint32_t pipe_idx, uint32_t cmd_idx,
706 uint32_t cmd_input_idx, bool seg_only)
707 {
708 struct vpe_cmd_info *cmd_info = &vpe_priv->vpe_cmd_info[cmd_idx];
709 struct vpe_cmd_input *cmd_input = &cmd_info->inputs[cmd_input_idx];
710 struct stream_ctx *stream_ctx = &vpe_priv->stream_ctx[cmd_input->stream_idx];
711 struct vpe_surface_info *surface_info = &stream_ctx->stream.surface_info;
712 struct cdc *cdc = vpe_priv->resource.cdc[pipe_idx];
713 struct dpp *dpp = vpe_priv->resource.dpp[pipe_idx];
714 struct mpc *mpc = vpe_priv->resource.mpc[pipe_idx];
715 enum input_csc_select select = INPUT_CSC_SELECT_BYPASS;
716 uint32_t hw_mult = 0;
717 struct custom_float_format fmt;
718
719 vpe_priv->fe_cb_ctx.stream_idx = cmd_input->stream_idx;
720 vpe_priv->fe_cb_ctx.vpe_priv = vpe_priv;
721
722 config_writer_set_callback(
723 &vpe_priv->config_writer, &vpe_priv->fe_cb_ctx, vpe_frontend_config_callback);
724
725 config_writer_set_type(&vpe_priv->config_writer, CONFIG_TYPE_DIRECT);
726
727 if (!seg_only) {
728 /* start front-end programming that can be shared among segments */
729 vpe_priv->fe_cb_ctx.stream_sharing = true;
730
731 cdc->funcs->program_surface_config(cdc, surface_info->format, stream_ctx->stream.rotation,
732 // set to false as h_mirror is not supported by input, only supported in output
733 false, surface_info->swizzle);
734 cdc->funcs->program_crossbar_config(cdc, surface_info->format);
735
736 dpp->funcs->program_cnv(dpp, surface_info->format, vpe_priv->expansion_mode);
737 if (stream_ctx->bias_scale)
738 dpp->funcs->program_cnv_bias_scale(dpp, stream_ctx->bias_scale);
739
740 /* If input adjustment exists, program the ICSC with those values. */
741 if (stream_ctx->input_cs) {
742 select = INPUT_CSC_SELECT_ICSC;
743 dpp->funcs->program_post_csc(dpp, stream_ctx->cs, select, stream_ctx->input_cs);
744 } else {
745 dpp->funcs->program_post_csc(dpp, stream_ctx->cs, select, NULL);
746 }
747 dpp->funcs->program_input_transfer_func(dpp, stream_ctx->input_tf);
748 dpp->funcs->program_gamut_remap(dpp, stream_ctx->gamut_remap);
749
750 // for not bypass mode, we always are in single layer coming from DPP and output to OPP
751 mpc->funcs->program_mpcc_mux(mpc, MPC_MPCCID_0, MPC_MUX_TOPSEL_DPP0, MPC_MUX_BOTSEL_DISABLE,
752 MPC_MUX_OUTMUX_MPCC0, MPC_MUX_OPPID_OPP0);
753
754 // program shaper, 3dlut and 1dlut in MPC for stream before blend
755 mpc->funcs->program_movable_cm(
756 mpc, stream_ctx->in_shaper_func, stream_ctx->lut3d_func, stream_ctx->blend_tf, false);
757
758 // program hdr_mult
759 fmt.exponenta_bits = 6;
760 fmt.mantissa_bits = 12;
761 fmt.sign = true;
762 if (stream_ctx->stream.tm_params.UID || stream_ctx->stream.tm_params.enable_3dlut) {
763 vpe_convert_to_custom_float_format(
764 stream_ctx->lut3d_func->hdr_multiplier, &fmt, &hw_mult);
765 } else {
766 vpe_convert_to_custom_float_format(stream_ctx->white_point_gain, &fmt, &hw_mult);
767 }
768 dpp->funcs->set_hdr_multiplier(dpp, hw_mult);
769
770 if (vpe_priv->init.debug.dpp_crc_ctrl)
771 dpp->funcs->program_crc(dpp, true);
772
773 if (vpe_priv->init.debug.mpc_crc_ctrl)
774 mpc->funcs->program_crc(mpc, true);
775
776 // put other hw programming for stream specific that can be shared here
777
778 config_writer_complete(&vpe_priv->config_writer);
779 }
780
781 vpe10_create_stream_ops_config(vpe_priv, pipe_idx, stream_ctx, cmd_input, cmd_info->ops);
782
783 /* start segment specific programming */
784 vpe_priv->fe_cb_ctx.stream_sharing = false;
785 vpe_priv->fe_cb_ctx.stream_op_sharing = false;
786 vpe_priv->fe_cb_ctx.cmd_type = VPE_CMD_TYPE_COMPOSITING;
787
788 cdc->funcs->program_viewport(
789 cdc, &cmd_input->scaler_data.viewport, &cmd_input->scaler_data.viewport_c);
790
791 dpp->funcs->set_segment_scaler(dpp, &cmd_input->scaler_data);
792
793 config_writer_complete(&vpe_priv->config_writer);
794
795 return 0;
796 }
797
vpe10_program_backend(struct vpe_priv * vpe_priv,uint32_t pipe_idx,uint32_t cmd_idx,bool seg_only)798 int32_t vpe10_program_backend(
799 struct vpe_priv *vpe_priv, uint32_t pipe_idx, uint32_t cmd_idx, bool seg_only)
800 {
801 struct output_ctx *output_ctx = &vpe_priv->output_ctx;
802 struct vpe_surface_info *surface_info = &vpe_priv->output_ctx.surface;
803
804 struct cdc *cdc = vpe_priv->resource.cdc[pipe_idx];
805 struct opp *opp = vpe_priv->resource.opp[pipe_idx];
806 struct mpc *mpc = vpe_priv->resource.mpc[pipe_idx];
807
808 struct bit_depth_reduction_params fmt_bit_depth;
809 struct clamping_and_pixel_encoding_params clamp_param;
810 enum color_depth display_color_depth;
811 uint16_t alpha_16;
812 bool opp_dig_bypass = false;
813
814 vpe_priv->be_cb_ctx.vpe_priv = vpe_priv;
815 config_writer_set_callback(
816 &vpe_priv->config_writer, &vpe_priv->be_cb_ctx, vpe_backend_config_callback);
817
818 config_writer_set_type(&vpe_priv->config_writer, CONFIG_TYPE_DIRECT);
819
820 if (!seg_only) {
821 /* start back-end programming that can be shared among segments */
822 vpe_priv->be_cb_ctx.share = true;
823
824 cdc->funcs->program_p2b_config(cdc, surface_info->format);
825 cdc->funcs->program_global_sync(cdc, VPE10_CDC_VUPDATE_OFFSET_DEFAULT,
826 VPE10_CDC_VUPDATE_WIDTH_DEFAULT, VPE10_CDC_VREADY_OFFSET_DEFAULT);
827
828 mpc->funcs->set_output_transfer_func(mpc, output_ctx);
829 // program shaper, 3dlut and 1dlut in MPC for after blend
830 // Note: cannot program both before and after blend CM
831 // caller should ensure only one is programmed
832 // mpc->funcs->program_movable_cm(mpc, output_ctx->in_shaper_func,
833 // output_ctx->lut3d_func, output_ctx->blend_tf, true);
834 mpc->funcs->program_mpc_out(mpc, surface_info->format);
835
836 // Post blend gamut remap
837 mpc->funcs->set_gamut_remap(mpc, output_ctx->gamut_remap);
838
839 if (vpe_is_fp16(surface_info->format)) {
840 if (vpe_priv->output_ctx.alpha_mode == VPE_ALPHA_BGCOLOR)
841 vpe_convert_from_float_to_fp16(
842 (double)vpe_priv->output_ctx.bg_color.rgba.a, &alpha_16);
843 else
844 vpe_convert_from_float_to_fp16(1.0, &alpha_16);
845
846 opp_dig_bypass = true;
847 } else {
848 if (vpe_priv->output_ctx.alpha_mode == VPE_ALPHA_BGCOLOR)
849 alpha_16 = (uint16_t)(vpe_priv->output_ctx.bg_color.rgba.a * 0xffff);
850 else
851 alpha_16 = 0xffff;
852 }
853
854 opp->funcs->program_pipe_alpha(opp, alpha_16);
855 opp->funcs->program_pipe_bypass(opp, opp_dig_bypass);
856
857 display_color_depth = vpe_get_color_depth(surface_info->format);
858 build_clamping_params(opp, &clamp_param);
859 vpe_resource_build_bit_depth_reduction_params(opp, &fmt_bit_depth);
860
861 // disable dynamic expansion for now as no use case
862 opp->funcs->set_dyn_expansion(opp, false, display_color_depth);
863 opp->funcs->program_fmt(opp, &fmt_bit_depth, &clamp_param);
864 if (vpe_priv->init.debug.opp_pipe_crc_ctrl)
865 opp->funcs->program_pipe_crc(opp, true);
866
867 config_writer_complete(&vpe_priv->config_writer);
868 }
869
870 return 0;
871 }
872
vpe10_populate_cmd_info(struct vpe_priv * vpe_priv)873 enum vpe_status vpe10_populate_cmd_info(struct vpe_priv *vpe_priv)
874 {
875 uint16_t stream_idx;
876 uint16_t segment_idx;
877 struct stream_ctx *stream_ctx;
878 struct vpe_cmd_info *cmd_info;
879 bool tm_enabled;
880
881 for (stream_idx = 0; stream_idx < vpe_priv->num_streams; stream_idx++) {
882 stream_ctx = &vpe_priv->stream_ctx[stream_idx];
883
884 tm_enabled = stream_ctx->stream.tm_params.UID != 0 || stream_ctx->stream.tm_params.enable_3dlut;
885
886 for (segment_idx = 0; segment_idx < stream_ctx->num_segments; segment_idx++) {
887 if (vpe_priv->num_vpe_cmds >= MAX_VPE_CMD) {
888 return VPE_STATUS_CMD_OVERFLOW_ERROR;
889 }
890
891 cmd_info = &vpe_priv->vpe_cmd_info[vpe_priv->num_vpe_cmds];
892 cmd_info->inputs[0].stream_idx = stream_idx;
893 cmd_info->cd = (uint8_t)(stream_ctx->num_segments - segment_idx - 1);
894 memcpy(&(cmd_info->inputs[0].scaler_data),
895 &(stream_ctx->segment_ctx[segment_idx].scaler_data), sizeof(struct scaler_data));
896 cmd_info->num_outputs = 1;
897 cmd_info->outputs[0].dst_viewport = stream_ctx->segment_ctx[segment_idx].scaler_data.dst_viewport;
898 cmd_info->outputs[0].dst_viewport_c =
899 stream_ctx->segment_ctx[segment_idx].scaler_data.dst_viewport_c;
900 cmd_info->num_inputs = 1;
901 cmd_info->ops = VPE_CMD_OPS_COMPOSITING;
902 cmd_info->tm_enabled = tm_enabled;
903 vpe_priv->num_vpe_cmds++;
904 cmd_info->insert_start_csync = false;
905 cmd_info->insert_end_csync = false;
906
907 // The following codes are only valid if blending is supported
908 /*
909 if (cmd_info->ops == VPE_CMD_OPS_BLENDING) {
910 if (cmd_info->cd == (stream_ctx->num_segments - 1)) {
911 cmd_info->insert_start_csync = true;
912 }
913
914 if (cmd_info->cd == 0) {
915 cmd_info->insert_end_csync = true;
916 }
917 }
918 */
919 }
920 }
921
922 return VPE_STATUS_OK;
923 }
924
vpe10_create_stream_ops_config(struct vpe_priv * vpe_priv,uint32_t pipe_idx,struct stream_ctx * stream_ctx,struct vpe_cmd_input * cmd_input,enum vpe_cmd_ops ops)925 void vpe10_create_stream_ops_config(struct vpe_priv *vpe_priv, uint32_t pipe_idx,
926 struct stream_ctx *stream_ctx, struct vpe_cmd_input *cmd_input, enum vpe_cmd_ops ops)
927 {
928 /* put all hw programming that can be shared according to the cmd type within a stream here */
929 struct mpcc_blnd_cfg blndcfg = {0};
930 struct dpp *dpp = vpe_priv->resource.dpp[pipe_idx];
931 struct mpc *mpc = vpe_priv->resource.mpc[pipe_idx];
932 enum vpe_cmd_type cmd_type = VPE_CMD_TYPE_COUNT;
933
934 vpe_priv->fe_cb_ctx.stream_op_sharing = true;
935 vpe_priv->fe_cb_ctx.stream_sharing = false;
936
937 if (ops == VPE_CMD_OPS_BG) {
938 cmd_type = VPE_CMD_TYPE_BG;
939 } else if (ops == VPE_CMD_OPS_COMPOSITING) {
940 cmd_type = VPE_CMD_TYPE_COMPOSITING;
941 } else if (ops == VPE_CMD_OPS_BG_VSCF_INPUT) {
942 cmd_type = VPE_CMD_TYPE_BG_VSCF_INPUT;
943 } else if (ops == VPE_CMD_OPS_BG_VSCF_OUTPUT) {
944 cmd_type = VPE_CMD_TYPE_BG_VSCF_OUTPUT;
945 } else
946 return;
947
948 // return if already generated
949 if (stream_ctx->num_stream_op_configs[cmd_type])
950 return;
951
952 vpe_priv->fe_cb_ctx.cmd_type = cmd_type;
953
954 dpp->funcs->set_frame_scaler(dpp, &cmd_input->scaler_data);
955
956 if (ops == VPE_CMD_OPS_BG_VSCF_INPUT) {
957 blndcfg.bg_color = vpe_get_visual_confirm_color(stream_ctx->stream.surface_info.format,
958 stream_ctx->stream.surface_info.cs, vpe_priv->output_ctx.cs,
959 vpe_priv->output_ctx.output_tf, vpe_priv->output_ctx.surface.format,
960 (stream_ctx->stream.tm_params.UID != 0 || stream_ctx->stream.tm_params.enable_3dlut));
961 } else if (ops == VPE_CMD_OPS_BG_VSCF_OUTPUT) {
962 blndcfg.bg_color = vpe_get_visual_confirm_color(vpe_priv->output_ctx.surface.format,
963 vpe_priv->output_ctx.surface.cs, vpe_priv->output_ctx.cs,
964 vpe_priv->output_ctx.output_tf, vpe_priv->output_ctx.surface.format,
965 false); // 3DLUT should only affect input visual confirm
966 } else {
967 blndcfg.bg_color = vpe_priv->output_ctx.bg_color;
968 }
969 blndcfg.global_gain = 0xff;
970 blndcfg.pre_multiplied_alpha = false;
971
972 if (stream_ctx->stream.blend_info.blending) {
973 if (stream_ctx->per_pixel_alpha) {
974 blndcfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
975
976 blndcfg.pre_multiplied_alpha = stream_ctx->stream.blend_info.pre_multiplied_alpha;
977 if (stream_ctx->stream.blend_info.global_alpha)
978 blndcfg.global_gain =
979 (uint8_t)(stream_ctx->stream.blend_info.global_alpha_value * 0xff);
980 } else {
981 blndcfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
982 if (stream_ctx->stream.blend_info.global_alpha == true) {
983 VPE_ASSERT(stream_ctx->stream.blend_info.global_alpha_value <= 1.0f);
984 blndcfg.global_alpha =
985 (uint8_t)(stream_ctx->stream.blend_info.global_alpha_value * 0xff);
986 } else {
987 // Global alpha not enabled, make top layer opaque
988 blndcfg.global_alpha = 0xff;
989 }
990 }
991 } else {
992 blndcfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
993 blndcfg.global_alpha = 0xff;
994 }
995
996 if (cmd_type == VPE_CMD_TYPE_BG || cmd_type == VPE_CMD_TYPE_BG_VSCF_INPUT ||
997 cmd_type == VPE_CMD_TYPE_BG_VSCF_OUTPUT) {
998 // for bg commands, make top layer transparent
999 // as global alpha only works when global alpha mode, set global alpha mode as well
1000 blndcfg.global_alpha = 0;
1001 blndcfg.global_gain = 0xff;
1002 blndcfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
1003 }
1004
1005 blndcfg.overlap_only = false;
1006 blndcfg.bottom_gain_mode = 0;
1007
1008 switch (vpe_priv->init.debug.bg_bit_depth) {
1009 case 8:
1010 blndcfg.background_color_bpc = 0;
1011 break;
1012 case 9:
1013 blndcfg.background_color_bpc = 1;
1014 break;
1015 case 10:
1016 blndcfg.background_color_bpc = 2;
1017 break;
1018 case 11:
1019 blndcfg.background_color_bpc = 3;
1020 break;
1021 case 12:
1022 default:
1023 blndcfg.background_color_bpc = 4; // 12 bit. DAL's choice;
1024 break;
1025 }
1026
1027 blndcfg.top_gain = 0x1f000;
1028 blndcfg.bottom_inside_gain = 0x1f000;
1029 blndcfg.bottom_outside_gain = 0x1f000;
1030
1031 mpc->funcs->program_mpcc_blending(mpc, MPC_MPCCID_0, &blndcfg);
1032
1033 config_writer_complete(&vpe_priv->config_writer);
1034 }
1035
1036 #define VPE10_GENERAL_VPE_DESC_SIZE 64 // 4 * (4 + (2 * num_configs))
1037 #define VPE10_GENERAL_EMB_USAGE_FRAME_SHARED 6000 // currently max 4804 is recorded
1038 #define VPE10_GENERAL_EMB_USAGE_3DLUT_FRAME_SHARED 40960 // currently max 35192 is recorded
1039 #define VPE10_GENERAL_EMB_USAGE_BG_SHARED 2400 // currently max 1772 + 92 + 72 = 1936 is recorded
1040 #define VPE10_GENERAL_EMB_USAGE_SEG_NON_SHARED \
1041 240 // segment specific config + plane descripor size. currently max 92 + 72 = 164 is recorded.
1042
vpe10_get_bufs_req(struct vpe_priv * vpe_priv,struct vpe_bufs_req * req)1043 void vpe10_get_bufs_req(struct vpe_priv *vpe_priv, struct vpe_bufs_req *req)
1044 {
1045 uint32_t i;
1046 struct vpe_cmd_info *cmd_info;
1047 uint32_t stream_idx = 0xFFFFFFFF;
1048 uint64_t emb_req = 0;
1049 bool have_visual_confirm_input = false;
1050 bool have_visual_confirm_output = false;
1051
1052 req->cmd_buf_size = 0;
1053 req->emb_buf_size = 0;
1054
1055 for (i = 0; i < vpe_priv->num_vpe_cmds; i++) {
1056 cmd_info = &vpe_priv->vpe_cmd_info[i];
1057
1058 // each cmd consumes one VPE descriptor
1059 req->cmd_buf_size += VPE10_GENERAL_VPE_DESC_SIZE;
1060
1061 // if a command represents the first segment of a stream,
1062 // total amount of config sizes is added, but for other segments
1063 // just the segment specific config size is added
1064 if (cmd_info->ops == VPE_CMD_OPS_COMPOSITING) {
1065 if (stream_idx != cmd_info->inputs[0].stream_idx) {
1066 emb_req = cmd_info->tm_enabled ? VPE10_GENERAL_EMB_USAGE_3DLUT_FRAME_SHARED
1067 : VPE10_GENERAL_EMB_USAGE_FRAME_SHARED;
1068 stream_idx = cmd_info->inputs[0].stream_idx;
1069 } else {
1070 emb_req = VPE10_GENERAL_EMB_USAGE_SEG_NON_SHARED;
1071 }
1072 } else if (cmd_info->ops == VPE_CMD_OPS_BG) {
1073 emb_req =
1074 i > 0 ? VPE10_GENERAL_EMB_USAGE_SEG_NON_SHARED : VPE10_GENERAL_EMB_USAGE_BG_SHARED;
1075 } else if (cmd_info->ops == VPE_CMD_OPS_BG_VSCF_INPUT) {
1076 emb_req = have_visual_confirm_input ? VPE10_GENERAL_EMB_USAGE_SEG_NON_SHARED
1077 : VPE10_GENERAL_EMB_USAGE_BG_SHARED;
1078 have_visual_confirm_input = true;
1079 } else if (cmd_info->ops == VPE_CMD_OPS_BG_VSCF_OUTPUT) {
1080 emb_req = have_visual_confirm_output ? VPE10_GENERAL_EMB_USAGE_SEG_NON_SHARED
1081 : VPE10_GENERAL_EMB_USAGE_BG_SHARED;
1082 have_visual_confirm_output = true;
1083 } else {
1084 VPE_ASSERT(0);
1085 }
1086
1087 req->emb_buf_size += emb_req;
1088 }
1089 }
1090