1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2022 MediaTek Inc.
4 * Author: Ping-Hsun Wu <[email protected]>
5 */
6
7 #include <linux/clk.h>
8 #include <linux/of_platform.h>
9 #include <linux/of_address.h>
10 #include <linux/pm_runtime.h>
11 #include "mtk-mdp3-cfg.h"
12 #include "mtk-mdp3-comp.h"
13 #include "mtk-mdp3-core.h"
14 #include "mtk-mdp3-regs.h"
15
16 #include "mdp_reg_aal.h"
17 #include "mdp_reg_ccorr.h"
18 #include "mdp_reg_color.h"
19 #include "mdp_reg_fg.h"
20 #include "mdp_reg_hdr.h"
21 #include "mdp_reg_merge.h"
22 #include "mdp_reg_ovl.h"
23 #include "mdp_reg_pad.h"
24 #include "mdp_reg_rdma.h"
25 #include "mdp_reg_rsz.h"
26 #include "mdp_reg_tdshp.h"
27 #include "mdp_reg_wdma.h"
28 #include "mdp_reg_wrot.h"
29
30 static u32 mdp_comp_alias_id[MDP_COMP_TYPE_COUNT];
31 static int p_id;
32
33 static inline const struct mdp_platform_config *
__get_plat_cfg(const struct mdp_comp_ctx * ctx)34 __get_plat_cfg(const struct mdp_comp_ctx *ctx)
35 {
36 if (!ctx)
37 return NULL;
38
39 return ctx->comp->mdp_dev->mdp_data->mdp_cfg;
40 }
41
get_comp_flag(const struct mdp_comp_ctx * ctx)42 static s64 get_comp_flag(const struct mdp_comp_ctx *ctx)
43 {
44 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
45 u32 rdma0, rsz1;
46
47 rdma0 = mdp_cfg_get_id_inner(ctx->comp->mdp_dev, MDP_COMP_RDMA0);
48 rsz1 = mdp_cfg_get_id_inner(ctx->comp->mdp_dev, MDP_COMP_RSZ1);
49 if (!rdma0 || !rsz1)
50 return MDP_COMP_NONE;
51
52 if (mdp_cfg && mdp_cfg->rdma_rsz1_sram_sharing)
53 if (ctx->comp->inner_id == rdma0)
54 return BIT(rdma0) | BIT(rsz1);
55
56 return BIT(ctx->comp->inner_id);
57 }
58
init_rdma(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)59 static int init_rdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
60 {
61 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
62 phys_addr_t base = ctx->comp->reg_base;
63 u8 subsys_id = ctx->comp->subsys_id;
64 s32 rdma0;
65
66 rdma0 = mdp_cfg_get_id_inner(ctx->comp->mdp_dev, MDP_COMP_RDMA0);
67 if (!rdma0)
68 return -EINVAL;
69
70 if (mdp_cfg && mdp_cfg->rdma_support_10bit) {
71 struct mdp_comp *prz1 = ctx->comp->mdp_dev->comp[MDP_COMP_RSZ1];
72
73 /* Disable RSZ1 */
74 if (ctx->comp->inner_id == rdma0 && prz1)
75 MM_REG_WRITE_MASK(cmd, subsys_id, prz1->reg_base,
76 PRZ_ENABLE, 0x0, BIT(0));
77 }
78
79 /* Reset RDMA */
80 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_RESET, BIT(0), BIT(0));
81 MM_REG_POLL_MASK(cmd, subsys_id, base, MDP_RDMA_MON_STA_1, BIT(8), BIT(8));
82 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_RESET, 0x0, BIT(0));
83 return 0;
84 }
85
config_rdma_frame(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,const struct v4l2_rect * compose)86 static int config_rdma_frame(struct mdp_comp_ctx *ctx,
87 struct mdp_cmdq_cmd *cmd,
88 const struct v4l2_rect *compose)
89 {
90 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
91 u32 colorformat = ctx->input->buffer.format.colorformat;
92 bool block10bit = MDP_COLOR_IS_10BIT_PACKED(colorformat);
93 bool en_ufo = MDP_COLOR_IS_UFP(colorformat);
94 phys_addr_t base = ctx->comp->reg_base;
95 u8 subsys_id = ctx->comp->subsys_id;
96 u32 rdma_con_mask = 0;
97 u32 reg = 0;
98
99 if (mdp_cfg && mdp_cfg->rdma_support_10bit) {
100 if (block10bit)
101 MM_REG_WRITE_MASK(cmd, subsys_id, base,
102 MDP_RDMA_RESV_DUMMY_0, 0x7, 0x7);
103 else
104 MM_REG_WRITE_MASK(cmd, subsys_id, base,
105 MDP_RDMA_RESV_DUMMY_0, 0x0, 0x7);
106 }
107
108 /* Setup smi control */
109 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_GMCIF_CON,
110 (7 << 4) + //burst type to 8
111 (1 << 16), //enable pre-ultra
112 0x00030071);
113
114 /* Setup source frame info */
115 if (CFG_CHECK(MT8183, p_id))
116 reg = CFG_COMP(MT8183, ctx->param, rdma.src_ctrl);
117 else if (CFG_CHECK(MT8195, p_id))
118 reg = CFG_COMP(MT8195, ctx->param, rdma.src_ctrl);
119 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_SRC_CON, reg, 0x03C8FE0F);
120
121 if (mdp_cfg)
122 if (mdp_cfg->rdma_support_10bit && en_ufo) {
123 /* Setup source buffer base */
124 if (CFG_CHECK(MT8183, p_id))
125 reg = CFG_COMP(MT8183, ctx->param, rdma.ufo_dec_y);
126 else if (CFG_CHECK(MT8195, p_id))
127 reg = CFG_COMP(MT8195, ctx->param, rdma.ufo_dec_y);
128 MM_REG_WRITE(cmd, subsys_id, base,
129 MDP_RDMA_UFO_DEC_LENGTH_BASE_Y, reg);
130
131 if (CFG_CHECK(MT8183, p_id))
132 reg = CFG_COMP(MT8183, ctx->param, rdma.ufo_dec_c);
133 else if (CFG_CHECK(MT8195, p_id))
134 reg = CFG_COMP(MT8195, ctx->param, rdma.ufo_dec_c);
135 MM_REG_WRITE(cmd, subsys_id, base,
136 MDP_RDMA_UFO_DEC_LENGTH_BASE_C, reg);
137
138 /* Set 10bit source frame pitch */
139 if (block10bit) {
140 if (CFG_CHECK(MT8183, p_id))
141 reg = CFG_COMP(MT8183, ctx->param, rdma.mf_bkgd_in_pxl);
142 else if (CFG_CHECK(MT8195, p_id))
143 reg = CFG_COMP(MT8195, ctx->param, rdma.mf_bkgd_in_pxl);
144 MM_REG_WRITE_MASK(cmd, subsys_id, base,
145 MDP_RDMA_MF_BKGD_SIZE_IN_PXL,
146 reg, 0x001FFFFF);
147 }
148 }
149
150 if (CFG_CHECK(MT8183, p_id)) {
151 reg = CFG_COMP(MT8183, ctx->param, rdma.control);
152 rdma_con_mask = 0x1110;
153 } else if (CFG_CHECK(MT8195, p_id)) {
154 reg = CFG_COMP(MT8195, ctx->param, rdma.control);
155 rdma_con_mask = 0x1130;
156 }
157 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_CON, reg, rdma_con_mask);
158
159 /* Setup source buffer base */
160 if (CFG_CHECK(MT8183, p_id))
161 reg = CFG_COMP(MT8183, ctx->param, rdma.iova[0]);
162 else if (CFG_CHECK(MT8195, p_id))
163 reg = CFG_COMP(MT8195, ctx->param, rdma.iova[0]);
164 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_0, reg);
165
166 if (CFG_CHECK(MT8183, p_id))
167 reg = CFG_COMP(MT8183, ctx->param, rdma.iova[1]);
168 else if (CFG_CHECK(MT8195, p_id))
169 reg = CFG_COMP(MT8195, ctx->param, rdma.iova[1]);
170 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_1, reg);
171
172 if (CFG_CHECK(MT8183, p_id))
173 reg = CFG_COMP(MT8183, ctx->param, rdma.iova[2]);
174 else if (CFG_CHECK(MT8195, p_id))
175 reg = CFG_COMP(MT8195, ctx->param, rdma.iova[2]);
176 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_2, reg);
177
178 /* Setup source buffer end */
179 if (CFG_CHECK(MT8183, p_id))
180 reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[0]);
181 else if (CFG_CHECK(MT8195, p_id))
182 reg = CFG_COMP(MT8195, ctx->param, rdma.iova_end[0]);
183 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_0, reg);
184
185 if (CFG_CHECK(MT8183, p_id))
186 reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[1]);
187 else if (CFG_CHECK(MT8195, p_id))
188 reg = CFG_COMP(MT8195, ctx->param, rdma.iova_end[1]);
189 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_1, reg);
190
191 if (CFG_CHECK(MT8183, p_id))
192 reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[2]);
193 else if (CFG_CHECK(MT8195, p_id))
194 reg = CFG_COMP(MT8195, ctx->param, rdma.iova_end[2]);
195 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_2, reg);
196
197 /* Setup source frame pitch */
198 if (CFG_CHECK(MT8183, p_id))
199 reg = CFG_COMP(MT8183, ctx->param, rdma.mf_bkgd);
200 else if (CFG_CHECK(MT8195, p_id))
201 reg = CFG_COMP(MT8195, ctx->param, rdma.mf_bkgd);
202 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_MF_BKGD_SIZE_IN_BYTE,
203 reg, 0x001FFFFF);
204
205 if (CFG_CHECK(MT8183, p_id))
206 reg = CFG_COMP(MT8183, ctx->param, rdma.sf_bkgd);
207 else if (CFG_CHECK(MT8195, p_id))
208 reg = CFG_COMP(MT8195, ctx->param, rdma.sf_bkgd);
209 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_SF_BKGD_SIZE_IN_BYTE,
210 reg, 0x001FFFFF);
211
212 /* Setup color transform */
213 if (CFG_CHECK(MT8183, p_id))
214 reg = CFG_COMP(MT8183, ctx->param, rdma.transform);
215 else if (CFG_CHECK(MT8195, p_id))
216 reg = CFG_COMP(MT8195, ctx->param, rdma.transform);
217 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_TRANSFORM_0,
218 reg, 0x0F110000);
219
220 if (!mdp_cfg || !mdp_cfg->rdma_esl_setting)
221 goto rdma_config_done;
222
223 if (CFG_CHECK(MT8195, p_id))
224 reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con0);
225 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_0,
226 reg, 0x0FFF00FF);
227
228 if (CFG_CHECK(MT8195, p_id))
229 reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_high_con0);
230 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_0,
231 reg, 0x3FFFFFFF);
232
233 if (CFG_CHECK(MT8195, p_id))
234 reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_low_con0);
235 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_0,
236 reg, 0x3FFFFFFF);
237
238 if (CFG_CHECK(MT8195, p_id))
239 reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con1);
240 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_1,
241 reg, 0x0F7F007F);
242
243 if (CFG_CHECK(MT8195, p_id))
244 reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_high_con1);
245 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_1,
246 reg, 0x3FFFFFFF);
247
248 if (CFG_CHECK(MT8195, p_id))
249 reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_low_con1);
250 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_1,
251 reg, 0x3FFFFFFF);
252
253 if (CFG_CHECK(MT8195, p_id))
254 reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con2);
255 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_2,
256 reg, 0x0F3F003F);
257
258 if (CFG_CHECK(MT8195, p_id))
259 reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_high_con2);
260 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_HIGH_CON_2,
261 reg, 0x3FFFFFFF);
262
263 if (CFG_CHECK(MT8195, p_id))
264 reg = CFG_COMP(MT8195, ctx->param, rdma.ultra_th_low_con2);
265 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_ULTRA_TH_LOW_CON_2,
266 reg, 0x3FFFFFFF);
267
268 if (CFG_CHECK(MT8195, p_id))
269 reg = CFG_COMP(MT8195, ctx->param, rdma.dmabuf_con3);
270 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_DMABUF_CON_3,
271 reg, 0x0F3F003F);
272
273 rdma_config_done:
274 return 0;
275 }
276
config_rdma_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)277 static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
278 struct mdp_cmdq_cmd *cmd, u32 index)
279 {
280 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
281 u32 colorformat = ctx->input->buffer.format.colorformat;
282 bool block10bit = MDP_COLOR_IS_10BIT_PACKED(colorformat);
283 bool en_ufo = MDP_COLOR_IS_UFP(colorformat);
284 phys_addr_t base = ctx->comp->reg_base;
285 u8 subsys_id = ctx->comp->subsys_id;
286 u32 csf_l = 0, csf_r = 0;
287 u32 reg = 0;
288
289 /* Enable RDMA */
290 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_EN, BIT(0), BIT(0));
291
292 /* Set Y pixel offset */
293 if (CFG_CHECK(MT8183, p_id))
294 reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[0]);
295 else if (CFG_CHECK(MT8195, p_id))
296 reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset[0]);
297 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_0, reg);
298
299 /* Set 10bit UFO mode */
300 if (mdp_cfg) {
301 if (mdp_cfg->rdma_support_10bit && block10bit && en_ufo) {
302 if (CFG_CHECK(MT8183, p_id))
303 reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset_0_p);
304 else if (CFG_CHECK(MT8195, p_id))
305 reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset_0_p);
306 MM_REG_WRITE(cmd, subsys_id, base,
307 MDP_RDMA_SRC_OFFSET_0_P, reg);
308 }
309 }
310
311 /* Set U pixel offset */
312 if (CFG_CHECK(MT8183, p_id))
313 reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[1]);
314 else if (CFG_CHECK(MT8195, p_id))
315 reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset[1]);
316 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_1, reg);
317
318 /* Set V pixel offset */
319 if (CFG_CHECK(MT8183, p_id))
320 reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[2]);
321 else if (CFG_CHECK(MT8195, p_id))
322 reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].offset[2]);
323 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_2, reg);
324
325 /* Set source size */
326 if (CFG_CHECK(MT8183, p_id))
327 reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].src);
328 else if (CFG_CHECK(MT8195, p_id))
329 reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].src);
330 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_MF_SRC_SIZE, reg,
331 0x1FFF1FFF);
332
333 /* Set target size */
334 if (CFG_CHECK(MT8183, p_id))
335 reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].clip);
336 else if (CFG_CHECK(MT8195, p_id))
337 reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].clip);
338 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_MF_CLIP_SIZE,
339 reg, 0x1FFF1FFF);
340
341 /* Set crop offset */
342 if (CFG_CHECK(MT8183, p_id))
343 reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].clip_ofst);
344 else if (CFG_CHECK(MT8195, p_id))
345 reg = CFG_COMP(MT8195, ctx->param, rdma.subfrms[index].clip_ofst);
346 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_MF_OFFSET_1,
347 reg, 0x003F001F);
348
349 if (CFG_CHECK(MT8183, p_id)) {
350 csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
351 csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
352 } else if (CFG_CHECK(MT8195, p_id)) {
353 csf_l = CFG_COMP(MT8195, ctx->param, subfrms[index].in.left);
354 csf_r = CFG_COMP(MT8195, ctx->param, subfrms[index].in.right);
355 }
356 if (mdp_cfg && mdp_cfg->rdma_upsample_repeat_only)
357 if ((csf_r - csf_l + 1) > 320)
358 MM_REG_WRITE_MASK(cmd, subsys_id, base,
359 MDP_RDMA_RESV_DUMMY_0, BIT(2), BIT(2));
360
361 return 0;
362 }
363
wait_rdma_event(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)364 static int wait_rdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
365 {
366 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
367 struct device *dev = &ctx->comp->mdp_dev->pdev->dev;
368 phys_addr_t base = ctx->comp->reg_base;
369 u8 subsys_id = ctx->comp->subsys_id;
370
371 if (!mdp_cfg)
372 return -EINVAL;
373
374 if (ctx->comp->alias_id >= mdp_cfg->rdma_event_num) {
375 dev_err(dev, "Invalid RDMA event %d\n", ctx->comp->alias_id);
376 return -EINVAL;
377 }
378
379 MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
380
381 /* Disable RDMA */
382 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_RDMA_EN, 0x0, BIT(0));
383 return 0;
384 }
385
386 static const struct mdp_comp_ops rdma_ops = {
387 .get_comp_flag = get_comp_flag,
388 .init_comp = init_rdma,
389 .config_frame = config_rdma_frame,
390 .config_subfrm = config_rdma_subfrm,
391 .wait_comp_event = wait_rdma_event,
392 };
393
init_rsz(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)394 static int init_rsz(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
395 {
396 phys_addr_t base = ctx->comp->reg_base;
397 u8 subsys_id = ctx->comp->subsys_id;
398
399 /* Reset RSZ */
400 MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_ENABLE, 0x10000, BIT(16));
401 MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(16));
402 /* Enable RSZ */
403 MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_ENABLE, BIT(0), BIT(0));
404
405 if (CFG_CHECK(MT8195, p_id)) {
406 struct device *dev;
407
408 dev = ctx->comp->mdp_dev->mm_subsys[MDP_MM_SUBSYS_1].mmsys;
409 mtk_mmsys_vpp_rsz_dcm_config(dev, true, NULL);
410 }
411
412 return 0;
413 }
414
config_rsz_frame(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,const struct v4l2_rect * compose)415 static int config_rsz_frame(struct mdp_comp_ctx *ctx,
416 struct mdp_cmdq_cmd *cmd,
417 const struct v4l2_rect *compose)
418 {
419 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
420 phys_addr_t base = ctx->comp->reg_base;
421 u8 subsys_id = ctx->comp->subsys_id;
422 bool bypass = FALSE;
423 u32 reg = 0;
424
425 if (mdp_cfg && mdp_cfg->rsz_etc_control)
426 MM_REG_WRITE(cmd, subsys_id, base, RSZ_ETC_CONTROL, 0x0);
427
428 if (CFG_CHECK(MT8183, p_id))
429 bypass = CFG_COMP(MT8183, ctx->param, frame.bypass);
430 else if (CFG_CHECK(MT8195, p_id))
431 bypass = CFG_COMP(MT8195, ctx->param, frame.bypass);
432
433 if (bypass) {
434 /* Disable RSZ */
435 MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(0));
436 return 0;
437 }
438
439 if (CFG_CHECK(MT8183, p_id))
440 reg = CFG_COMP(MT8183, ctx->param, rsz.control1);
441 else if (CFG_CHECK(MT8195, p_id))
442 reg = CFG_COMP(MT8195, ctx->param, rsz.control1);
443 MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CONTROL_1, reg, 0x03FFFDF3);
444
445 if (CFG_CHECK(MT8183, p_id))
446 reg = CFG_COMP(MT8183, ctx->param, rsz.control2);
447 else if (CFG_CHECK(MT8195, p_id))
448 reg = CFG_COMP(MT8195, ctx->param, rsz.control2);
449 MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CONTROL_2, reg, 0x0FFFC290);
450
451 if (CFG_CHECK(MT8183, p_id))
452 reg = CFG_COMP(MT8183, ctx->param, rsz.coeff_step_x);
453 else if (CFG_CHECK(MT8195, p_id))
454 reg = CFG_COMP(MT8195, ctx->param, rsz.coeff_step_x);
455 MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_HORIZONTAL_COEFF_STEP, reg,
456 0x007FFFFF);
457
458 if (CFG_CHECK(MT8183, p_id))
459 reg = CFG_COMP(MT8183, ctx->param, rsz.coeff_step_y);
460 else if (CFG_CHECK(MT8195, p_id))
461 reg = CFG_COMP(MT8195, ctx->param, rsz.coeff_step_y);
462 MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_VERTICAL_COEFF_STEP, reg,
463 0x007FFFFF);
464
465 return 0;
466 }
467
config_rsz_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)468 static int config_rsz_subfrm(struct mdp_comp_ctx *ctx,
469 struct mdp_cmdq_cmd *cmd, u32 index)
470 {
471 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
472 phys_addr_t base = ctx->comp->reg_base;
473 u8 subsys_id = ctx->comp->subsys_id;
474 u32 csf_l = 0, csf_r = 0;
475 u32 reg = 0;
476 u32 id;
477
478 if (CFG_CHECK(MT8183, p_id))
479 reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].control2);
480 else if (CFG_CHECK(MT8195, p_id))
481 reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].control2);
482 MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CONTROL_2, reg, 0x00003800);
483
484 if (CFG_CHECK(MT8183, p_id))
485 reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].src);
486 else if (CFG_CHECK(MT8195, p_id))
487 reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].src);
488 MM_REG_WRITE(cmd, subsys_id, base, PRZ_INPUT_IMAGE, reg);
489
490 if (CFG_CHECK(MT8183, p_id)) {
491 csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
492 csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
493 } else if (CFG_CHECK(MT8195, p_id)) {
494 csf_l = CFG_COMP(MT8195, ctx->param, subfrms[index].in.left);
495 csf_r = CFG_COMP(MT8195, ctx->param, subfrms[index].in.right);
496 }
497 if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample)
498 if ((csf_r - csf_l + 1) <= 16)
499 MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CONTROL_1,
500 BIT(27), BIT(27));
501
502 if (CFG_CHECK(MT8183, p_id))
503 reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.left);
504 else if (CFG_CHECK(MT8195, p_id))
505 reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.left);
506 MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_LUMA_HORIZONTAL_INTEGER_OFFSET,
507 reg, 0xFFFF);
508
509 if (CFG_CHECK(MT8183, p_id))
510 reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.left_subpix);
511 else if (CFG_CHECK(MT8195, p_id))
512 reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.left_subpix);
513 MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_LUMA_HORIZONTAL_SUBPIXEL_OFFSET,
514 reg, 0x1FFFFF);
515
516 if (CFG_CHECK(MT8183, p_id))
517 reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.top);
518 else if (CFG_CHECK(MT8195, p_id))
519 reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.top);
520 MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_INTEGER_OFFSET,
521 reg, 0xFFFF);
522
523 if (CFG_CHECK(MT8183, p_id))
524 reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.top_subpix);
525 else if (CFG_CHECK(MT8195, p_id))
526 reg = CFG_COMP(MT8195, ctx->param, subfrms[index].luma.top_subpix);
527 MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET,
528 reg, 0x1FFFFF);
529
530 if (CFG_CHECK(MT8183, p_id))
531 reg = CFG_COMP(MT8183, ctx->param, subfrms[index].chroma.left);
532 else if (CFG_CHECK(MT8195, p_id))
533 reg = CFG_COMP(MT8195, ctx->param, subfrms[index].chroma.left);
534 MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET,
535 reg, 0xFFFF);
536
537 if (CFG_CHECK(MT8183, p_id))
538 reg = CFG_COMP(MT8183, ctx->param, subfrms[index].chroma.left_subpix);
539 else if (CFG_CHECK(MT8195, p_id))
540 reg = CFG_COMP(MT8195, ctx->param, subfrms[index].chroma.left_subpix);
541 MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET,
542 reg, 0x1FFFFF);
543
544 if (CFG_CHECK(MT8183, p_id))
545 reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].clip);
546 else if (CFG_CHECK(MT8195, p_id))
547 reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].clip);
548 MM_REG_WRITE(cmd, subsys_id, base, PRZ_OUTPUT_IMAGE, reg);
549
550 if (CFG_CHECK(MT8195, p_id)) {
551 struct device *dev;
552 struct mdp_comp *merge;
553 const struct mtk_mdp_driver_data *data = ctx->comp->mdp_dev->mdp_data;
554 enum mtk_mdp_comp_id public_id = ctx->comp->public_id;
555
556 switch (public_id) {
557 case MDP_COMP_RSZ2:
558 merge = ctx->comp->mdp_dev->comp[MDP_COMP_MERGE2];
559 break;
560 case MDP_COMP_RSZ3:
561 merge = ctx->comp->mdp_dev->comp[MDP_COMP_MERGE3];
562 break;
563 default:
564 goto rsz_subfrm_done;
565 }
566
567 if (CFG_CHECK(MT8195, p_id))
568 reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].rsz_switch);
569
570 id = data->comp_data[public_id].match.alias_id;
571 dev = ctx->comp->mdp_dev->mm_subsys[MDP_MM_SUBSYS_1].mmsys;
572 mtk_mmsys_vpp_rsz_merge_config(dev, id, reg, NULL);
573
574 if (CFG_CHECK(MT8195, p_id))
575 reg = CFG_COMP(MT8195, ctx->param, rsz.subfrms[index].merge_cfg);
576 MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
577 MDP_MERGE_CFG_0, reg);
578 MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
579 MDP_MERGE_CFG_4, reg);
580 MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
581 MDP_MERGE_CFG_24, reg);
582 MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
583 MDP_MERGE_CFG_25, reg);
584
585 /* Bypass mode */
586 MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
587 MDP_MERGE_CFG_12, BIT(0));
588 MM_REG_WRITE(cmd, merge->subsys_id, merge->reg_base,
589 MDP_MERGE_ENABLE, BIT(0));
590 }
591
592 rsz_subfrm_done:
593 return 0;
594 }
595
advance_rsz_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)596 static int advance_rsz_subfrm(struct mdp_comp_ctx *ctx,
597 struct mdp_cmdq_cmd *cmd, u32 index)
598 {
599 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
600
601 if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample) {
602 phys_addr_t base = ctx->comp->reg_base;
603 u8 subsys_id = ctx->comp->subsys_id;
604 u32 csf_l = 0, csf_r = 0;
605
606 if (CFG_CHECK(MT8183, p_id)) {
607 csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
608 csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
609 } else if (CFG_CHECK(MT8195, p_id)) {
610 csf_l = CFG_COMP(MT8195, ctx->param, subfrms[index].in.left);
611 csf_r = CFG_COMP(MT8195, ctx->param, subfrms[index].in.right);
612 }
613
614 if ((csf_r - csf_l + 1) <= 16)
615 MM_REG_WRITE_MASK(cmd, subsys_id, base, PRZ_CONTROL_1, 0x0,
616 BIT(27));
617 }
618
619 return 0;
620 }
621
622 static const struct mdp_comp_ops rsz_ops = {
623 .get_comp_flag = get_comp_flag,
624 .init_comp = init_rsz,
625 .config_frame = config_rsz_frame,
626 .config_subfrm = config_rsz_subfrm,
627 .advance_subfrm = advance_rsz_subfrm,
628 };
629
init_wrot(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)630 static int init_wrot(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
631 {
632 phys_addr_t base = ctx->comp->reg_base;
633 u8 subsys_id = ctx->comp->subsys_id;
634
635 /* Reset WROT */
636 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_SOFT_RST, BIT(0), BIT(0));
637 MM_REG_POLL_MASK(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, BIT(0), BIT(0));
638
639 /* Reset setting */
640 if (CFG_CHECK(MT8195, p_id))
641 MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL, 0x0);
642
643 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_SOFT_RST, 0x0, BIT(0));
644 MM_REG_POLL_MASK(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, 0x0, BIT(0));
645 return 0;
646 }
647
config_wrot_frame(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,const struct v4l2_rect * compose)648 static int config_wrot_frame(struct mdp_comp_ctx *ctx,
649 struct mdp_cmdq_cmd *cmd,
650 const struct v4l2_rect *compose)
651 {
652 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
653 phys_addr_t base = ctx->comp->reg_base;
654 u8 subsys_id = ctx->comp->subsys_id;
655 u32 reg = 0;
656
657 /* Write frame base address */
658 if (CFG_CHECK(MT8183, p_id))
659 reg = CFG_COMP(MT8183, ctx->param, wrot.iova[0]);
660 else if (CFG_CHECK(MT8195, p_id))
661 reg = CFG_COMP(MT8195, ctx->param, wrot.iova[0]);
662 MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR, reg);
663
664 if (CFG_CHECK(MT8183, p_id))
665 reg = CFG_COMP(MT8183, ctx->param, wrot.iova[1]);
666 else if (CFG_CHECK(MT8195, p_id))
667 reg = CFG_COMP(MT8195, ctx->param, wrot.iova[1]);
668 MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_C, reg);
669
670 if (CFG_CHECK(MT8183, p_id))
671 reg = CFG_COMP(MT8183, ctx->param, wrot.iova[2]);
672 else if (CFG_CHECK(MT8195, p_id))
673 reg = CFG_COMP(MT8195, ctx->param, wrot.iova[2]);
674 MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_V, reg);
675
676 if (mdp_cfg && mdp_cfg->wrot_support_10bit) {
677 if (CFG_CHECK(MT8195, p_id))
678 reg = CFG_COMP(MT8195, ctx->param, wrot.scan_10bit);
679 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_SCAN_10BIT,
680 reg, 0x0000000F);
681
682 if (CFG_CHECK(MT8195, p_id))
683 reg = CFG_COMP(MT8195, ctx->param, wrot.pending_zero);
684 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_PENDING_ZERO,
685 reg, 0x04000000);
686 }
687
688 if (CFG_CHECK(MT8195, p_id)) {
689 reg = CFG_COMP(MT8195, ctx->param, wrot.bit_number);
690 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_CTRL_2,
691 reg, 0x00000007);
692 }
693
694 /* Write frame related registers */
695 if (CFG_CHECK(MT8183, p_id))
696 reg = CFG_COMP(MT8183, ctx->param, wrot.control);
697 else if (CFG_CHECK(MT8195, p_id))
698 reg = CFG_COMP(MT8195, ctx->param, wrot.control);
699 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_CTRL, reg, 0xF131510F);
700
701 /* Write pre-ultra threshold */
702 if (CFG_CHECK(MT8195, p_id)) {
703 reg = CFG_COMP(MT8195, ctx->param, wrot.pre_ultra);
704 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_DMA_PREULTRA, reg,
705 0x00FFFFFF);
706 }
707
708 /* Write frame Y pitch */
709 if (CFG_CHECK(MT8183, p_id))
710 reg = CFG_COMP(MT8183, ctx->param, wrot.stride[0]);
711 else if (CFG_CHECK(MT8195, p_id))
712 reg = CFG_COMP(MT8195, ctx->param, wrot.stride[0]);
713 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_STRIDE, reg, 0x0000FFFF);
714
715 /* Write frame UV pitch */
716 if (CFG_CHECK(MT8183, p_id))
717 reg = CFG_COMP(MT8183, ctx->param, wrot.stride[1]);
718 else if (CFG_CHECK(MT8195, p_id))
719 reg = CFG_COMP(MT8195, ctx->param, wrot.stride[1]);
720 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_STRIDE_C, reg, 0xFFFF);
721
722 if (CFG_CHECK(MT8183, p_id))
723 reg = CFG_COMP(MT8183, ctx->param, wrot.stride[2]);
724 else if (CFG_CHECK(MT8195, p_id))
725 reg = CFG_COMP(MT8195, ctx->param, wrot.stride[2]);
726 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_STRIDE_V, reg, 0xFFFF);
727
728 /* Write matrix control */
729 if (CFG_CHECK(MT8183, p_id))
730 reg = CFG_COMP(MT8183, ctx->param, wrot.mat_ctrl);
731 else if (CFG_CHECK(MT8195, p_id))
732 reg = CFG_COMP(MT8195, ctx->param, wrot.mat_ctrl);
733 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_MAT_CTRL, reg, 0xF3);
734
735 /* Set the fixed ALPHA as 0xFF */
736 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_DITHER, 0xFF000000,
737 0xFF000000);
738
739 /* Set VIDO_EOL_SEL */
740 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_RSV_1, BIT(31), BIT(31));
741
742 /* Set VIDO_FIFO_TEST */
743 if (CFG_CHECK(MT8183, p_id))
744 reg = CFG_COMP(MT8183, ctx->param, wrot.fifo_test);
745 else if (CFG_CHECK(MT8195, p_id))
746 reg = CFG_COMP(MT8195, ctx->param, wrot.fifo_test);
747
748 if (reg != 0)
749 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_FIFO_TEST, reg,
750 0xFFF);
751
752 /* Filter enable */
753 if (mdp_cfg && mdp_cfg->wrot_filter_constraint) {
754 if (CFG_CHECK(MT8183, p_id))
755 reg = CFG_COMP(MT8183, ctx->param, wrot.filter);
756 else if (CFG_CHECK(MT8195, p_id))
757 reg = CFG_COMP(MT8195, ctx->param, wrot.filter);
758 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, reg,
759 0x77);
760
761 /* Turn off WROT DMA DCM */
762 if (CFG_CHECK(MT8195, p_id))
763 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_ROT_EN,
764 (0x1 << 23) + (0x1 << 20), 0x900000);
765 }
766
767 return 0;
768 }
769
config_wrot_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)770 static int config_wrot_subfrm(struct mdp_comp_ctx *ctx,
771 struct mdp_cmdq_cmd *cmd, u32 index)
772 {
773 phys_addr_t base = ctx->comp->reg_base;
774 u8 subsys_id = ctx->comp->subsys_id;
775 u32 reg = 0;
776
777 /* Write Y pixel offset */
778 if (CFG_CHECK(MT8183, p_id))
779 reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[0]);
780 else if (CFG_CHECK(MT8195, p_id))
781 reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].offset[0]);
782 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_OFST_ADDR, reg, 0x0FFFFFFF);
783
784 /* Write U pixel offset */
785 if (CFG_CHECK(MT8183, p_id))
786 reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[1]);
787 else if (CFG_CHECK(MT8195, p_id))
788 reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].offset[1]);
789 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_OFST_ADDR_C, reg, 0x0FFFFFFF);
790
791 /* Write V pixel offset */
792 if (CFG_CHECK(MT8183, p_id))
793 reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[2]);
794 else if (CFG_CHECK(MT8195, p_id))
795 reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].offset[2]);
796 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_OFST_ADDR_V, reg,
797 0x0FFFFFFF);
798
799 /* Write source size */
800 if (CFG_CHECK(MT8183, p_id))
801 reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].src);
802 else if (CFG_CHECK(MT8195, p_id))
803 reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].src);
804 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_IN_SIZE, reg, 0x1FFF1FFF);
805
806 /* Write target size */
807 if (CFG_CHECK(MT8183, p_id))
808 reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].clip);
809 else if (CFG_CHECK(MT8195, p_id))
810 reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].clip);
811 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_TAR_SIZE, reg, 0x1FFF1FFF);
812
813 if (CFG_CHECK(MT8183, p_id))
814 reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].clip_ofst);
815 else if (CFG_CHECK(MT8195, p_id))
816 reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].clip_ofst);
817 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_CROP_OFST, reg, 0x1FFF1FFF);
818
819 if (CFG_CHECK(MT8183, p_id))
820 reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].main_buf);
821 else if (CFG_CHECK(MT8195, p_id))
822 reg = CFG_COMP(MT8195, ctx->param, wrot.subfrms[index].main_buf);
823 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, reg,
824 0x1FFF7F00);
825
826 /* Enable WROT */
827 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_ROT_EN, BIT(0), BIT(0));
828
829 return 0;
830 }
831
wait_wrot_event(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)832 static int wait_wrot_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
833 {
834 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
835 struct device *dev = &ctx->comp->mdp_dev->pdev->dev;
836 phys_addr_t base = ctx->comp->reg_base;
837 u8 subsys_id = ctx->comp->subsys_id;
838
839 if (!mdp_cfg)
840 return -EINVAL;
841
842 if (ctx->comp->alias_id >= mdp_cfg->wrot_event_num) {
843 dev_err(dev, "Invalid WROT event %d!\n", ctx->comp->alias_id);
844 return -EINVAL;
845 }
846
847 MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
848
849 if (mdp_cfg && mdp_cfg->wrot_filter_constraint)
850 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, 0x0,
851 0x77);
852
853 /* Disable WROT */
854 MM_REG_WRITE_MASK(cmd, subsys_id, base, VIDO_ROT_EN, 0x0, BIT(0));
855
856 return 0;
857 }
858
859 static const struct mdp_comp_ops wrot_ops = {
860 .get_comp_flag = get_comp_flag,
861 .init_comp = init_wrot,
862 .config_frame = config_wrot_frame,
863 .config_subfrm = config_wrot_subfrm,
864 .wait_comp_event = wait_wrot_event,
865 };
866
init_wdma(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)867 static int init_wdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
868 {
869 phys_addr_t base = ctx->comp->reg_base;
870 u8 subsys_id = ctx->comp->subsys_id;
871
872 /* Reset WDMA */
873 MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_RST, BIT(0), BIT(0));
874 MM_REG_POLL_MASK(cmd, subsys_id, base, WDMA_FLOW_CTRL_DBG, BIT(0), BIT(0));
875 MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_RST, 0x0, BIT(0));
876 return 0;
877 }
878
config_wdma_frame(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,const struct v4l2_rect * compose)879 static int config_wdma_frame(struct mdp_comp_ctx *ctx,
880 struct mdp_cmdq_cmd *cmd,
881 const struct v4l2_rect *compose)
882 {
883 phys_addr_t base = ctx->comp->reg_base;
884 u8 subsys_id = ctx->comp->subsys_id;
885 u32 reg = 0;
886
887 MM_REG_WRITE(cmd, subsys_id, base, WDMA_BUF_CON2, 0x10101050);
888
889 /* Setup frame information */
890 if (CFG_CHECK(MT8183, p_id))
891 reg = CFG_COMP(MT8183, ctx->param, wdma.wdma_cfg);
892 MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_CFG, reg, 0x0F01B8F0);
893 /* Setup frame base address */
894 if (CFG_CHECK(MT8183, p_id))
895 reg = CFG_COMP(MT8183, ctx->param, wdma.iova[0]);
896 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR, reg);
897 if (CFG_CHECK(MT8183, p_id))
898 reg = CFG_COMP(MT8183, ctx->param, wdma.iova[1]);
899 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR, reg);
900 if (CFG_CHECK(MT8183, p_id))
901 reg = CFG_COMP(MT8183, ctx->param, wdma.iova[2]);
902 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR, reg);
903 /* Setup Y pitch */
904 if (CFG_CHECK(MT8183, p_id))
905 reg = CFG_COMP(MT8183, ctx->param, wdma.w_in_byte);
906 MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_DST_W_IN_BYTE, reg,
907 0x0000FFFF);
908 /* Setup UV pitch */
909 if (CFG_CHECK(MT8183, p_id))
910 reg = CFG_COMP(MT8183, ctx->param, wdma.uv_stride);
911 MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_DST_UV_PITCH, reg,
912 0x0000FFFF);
913 /* Set the fixed ALPHA as 0xFF */
914 MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_ALPHA, 0x800000FF,
915 0x800000FF);
916
917 return 0;
918 }
919
config_wdma_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)920 static int config_wdma_subfrm(struct mdp_comp_ctx *ctx,
921 struct mdp_cmdq_cmd *cmd, u32 index)
922 {
923 phys_addr_t base = ctx->comp->reg_base;
924 u8 subsys_id = ctx->comp->subsys_id;
925 u32 reg = 0;
926
927 /* Write Y pixel offset */
928 if (CFG_CHECK(MT8183, p_id))
929 reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].offset[0]);
930 MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_DST_ADDR_OFFSET, reg,
931 0x0FFFFFFF);
932 /* Write U pixel offset */
933 if (CFG_CHECK(MT8183, p_id))
934 reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].offset[1]);
935 MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_DST_U_ADDR_OFFSET, reg,
936 0x0FFFFFFF);
937 /* Write V pixel offset */
938 if (CFG_CHECK(MT8183, p_id))
939 reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].offset[2]);
940 MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_DST_V_ADDR_OFFSET, reg,
941 0x0FFFFFFF);
942 /* Write source size */
943 if (CFG_CHECK(MT8183, p_id))
944 reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].src);
945 MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_SRC_SIZE, reg, 0x3FFF3FFF);
946 /* Write target size */
947 if (CFG_CHECK(MT8183, p_id))
948 reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].clip);
949 MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_CLIP_SIZE, reg, 0x3FFF3FFF);
950 /* Write clip offset */
951 if (CFG_CHECK(MT8183, p_id))
952 reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].clip_ofst);
953 MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_CLIP_COORD, reg, 0x3FFF3FFF);
954
955 /* Enable WDMA */
956 MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_EN, BIT(0), BIT(0));
957
958 return 0;
959 }
960
wait_wdma_event(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)961 static int wait_wdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
962 {
963 phys_addr_t base = ctx->comp->reg_base;
964 u8 subsys_id = ctx->comp->subsys_id;
965
966 MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
967 /* Disable WDMA */
968 MM_REG_WRITE_MASK(cmd, subsys_id, base, WDMA_EN, 0x0, BIT(0));
969 return 0;
970 }
971
972 static const struct mdp_comp_ops wdma_ops = {
973 .get_comp_flag = get_comp_flag,
974 .init_comp = init_wdma,
975 .config_frame = config_wdma_frame,
976 .config_subfrm = config_wdma_subfrm,
977 .wait_comp_event = wait_wdma_event,
978 };
979
reset_luma_hist(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)980 static int reset_luma_hist(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
981 {
982 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
983 phys_addr_t base = ctx->comp->reg_base;
984 u16 subsys_id = ctx->comp->subsys_id;
985 u32 hist_num, i;
986
987 if (!mdp_cfg)
988 return -EINVAL;
989
990 hist_num = mdp_cfg->tdshp_hist_num;
991
992 /* Reset histogram */
993 for (i = 0; i <= hist_num; i++)
994 MM_REG_WRITE(cmd, subsys_id, base,
995 (MDP_LUMA_HIST_INIT + (i << 2)), 0);
996
997 if (mdp_cfg->tdshp_constrain)
998 MM_REG_WRITE(cmd, subsys_id, base,
999 MDP_DC_TWO_D_W1_RESULT_INIT, 0);
1000
1001 if (mdp_cfg->tdshp_contour)
1002 for (i = 0; i < hist_num; i++)
1003 MM_REG_WRITE(cmd, subsys_id, base,
1004 (MDP_CONTOUR_HIST_INIT + (i << 2)), 0);
1005
1006 return 0;
1007 }
1008
init_tdshp(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)1009 static int init_tdshp(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
1010 {
1011 phys_addr_t base = ctx->comp->reg_base;
1012 u16 subsys_id = ctx->comp->subsys_id;
1013
1014 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_TDSHP_CTRL, BIT(0), BIT(0));
1015 /* Enable FIFO */
1016 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_TDSHP_CFG, BIT(1), BIT(1));
1017
1018 return reset_luma_hist(ctx, cmd);
1019 }
1020
config_tdshp_frame(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,const struct v4l2_rect * compose)1021 static int config_tdshp_frame(struct mdp_comp_ctx *ctx,
1022 struct mdp_cmdq_cmd *cmd,
1023 const struct v4l2_rect *compose)
1024 {
1025 phys_addr_t base = ctx->comp->reg_base;
1026 u16 subsys_id = ctx->comp->subsys_id;
1027 u32 reg = 0;
1028
1029 if (CFG_CHECK(MT8195, p_id))
1030 reg = CFG_COMP(MT8195, ctx->param, tdshp.cfg);
1031 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_TDSHP_CFG, reg, BIT(0));
1032
1033 return 0;
1034 }
1035
config_tdshp_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)1036 static int config_tdshp_subfrm(struct mdp_comp_ctx *ctx,
1037 struct mdp_cmdq_cmd *cmd, u32 index)
1038 {
1039 phys_addr_t base = ctx->comp->reg_base;
1040 u16 subsys_id = ctx->comp->subsys_id;
1041 u32 reg = 0;
1042
1043 if (CFG_CHECK(MT8195, p_id))
1044 reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].src);
1045 MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_INPUT_SIZE, reg);
1046
1047 if (CFG_CHECK(MT8195, p_id))
1048 reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].clip_ofst);
1049 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_TDSHP_OUTPUT_OFFSET, reg,
1050 0x00FF00FF);
1051
1052 if (CFG_CHECK(MT8195, p_id))
1053 reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].clip);
1054 MM_REG_WRITE(cmd, subsys_id, base, MDP_TDSHP_OUTPUT_SIZE, reg);
1055
1056 if (CFG_CHECK(MT8195, p_id))
1057 reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].hist_cfg_0);
1058 MM_REG_WRITE(cmd, subsys_id, base, MDP_HIST_CFG_00, reg);
1059
1060 if (CFG_CHECK(MT8195, p_id))
1061 reg = CFG_COMP(MT8195, ctx->param, tdshp.subfrms[index].hist_cfg_1);
1062 MM_REG_WRITE(cmd, subsys_id, base, MDP_HIST_CFG_01, reg);
1063
1064 return 0;
1065 }
1066
1067 static const struct mdp_comp_ops tdshp_ops = {
1068 .get_comp_flag = get_comp_flag,
1069 .init_comp = init_tdshp,
1070 .config_frame = config_tdshp_frame,
1071 .config_subfrm = config_tdshp_subfrm,
1072 };
1073
init_color(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)1074 static int init_color(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
1075 {
1076 phys_addr_t base = ctx->comp->reg_base;
1077 u16 subsys_id = ctx->comp->subsys_id;
1078
1079 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_START, 0x1,
1080 BIT(1) | BIT(0));
1081 MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_WIN_X_MAIN, 0xFFFF0000);
1082 MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_WIN_Y_MAIN, 0xFFFF0000);
1083
1084 /* Reset color matrix */
1085 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_CM1_EN, 0x0, BIT(0));
1086 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_CM2_EN, 0x0, BIT(0));
1087
1088 /* Enable interrupt */
1089 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_INTEN, 0x7, 0x7);
1090
1091 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_OUT_SEL, 0x333, 0x333);
1092
1093 return 0;
1094 }
1095
config_color_frame(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,const struct v4l2_rect * compose)1096 static int config_color_frame(struct mdp_comp_ctx *ctx,
1097 struct mdp_cmdq_cmd *cmd,
1098 const struct v4l2_rect *compose)
1099 {
1100 phys_addr_t base = ctx->comp->reg_base;
1101 u16 subsys_id = ctx->comp->subsys_id;
1102 u32 reg = 0;
1103
1104 if (CFG_CHECK(MT8195, p_id))
1105 reg = CFG_COMP(MT8195, ctx->param, color.start);
1106 MM_REG_WRITE(cmd, subsys_id, base, MDP_COLOR_START, reg);
1107
1108 return 0;
1109 }
1110
config_color_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)1111 static int config_color_subfrm(struct mdp_comp_ctx *ctx,
1112 struct mdp_cmdq_cmd *cmd, u32 index)
1113 {
1114 phys_addr_t base = ctx->comp->reg_base;
1115 u16 subsys_id = ctx->comp->subsys_id;
1116 u32 reg = 0;
1117
1118 if (CFG_CHECK(MT8195, p_id))
1119 reg = CFG_COMP(MT8195, ctx->param, color.subfrms[index].in_hsize);
1120 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_INTERNAL_IP_WIDTH,
1121 reg, 0x00003FFF);
1122
1123 if (CFG_CHECK(MT8195, p_id))
1124 reg = CFG_COMP(MT8195, ctx->param, color.subfrms[index].in_vsize);
1125 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_COLOR_INTERNAL_IP_HEIGHT,
1126 reg, 0x00003FFF);
1127
1128 return 0;
1129 }
1130
1131 static const struct mdp_comp_ops color_ops = {
1132 .get_comp_flag = get_comp_flag,
1133 .init_comp = init_color,
1134 .config_frame = config_color_frame,
1135 .config_subfrm = config_color_subfrm,
1136 };
1137
init_ccorr(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)1138 static int init_ccorr(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
1139 {
1140 phys_addr_t base = ctx->comp->reg_base;
1141 u8 subsys_id = ctx->comp->subsys_id;
1142
1143 /* CCORR enable */
1144 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_CCORR_EN, BIT(0), BIT(0));
1145 /* Relay mode */
1146 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_CCORR_CFG, BIT(0), BIT(0));
1147 return 0;
1148 }
1149
config_ccorr_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)1150 static int config_ccorr_subfrm(struct mdp_comp_ctx *ctx,
1151 struct mdp_cmdq_cmd *cmd, u32 index)
1152 {
1153 phys_addr_t base = ctx->comp->reg_base;
1154 u8 subsys_id = ctx->comp->subsys_id;
1155 u32 csf_l = 0, csf_r = 0;
1156 u32 csf_t = 0, csf_b = 0;
1157 u32 hsize, vsize;
1158
1159 if (CFG_CHECK(MT8183, p_id)) {
1160 csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
1161 csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
1162 csf_t = CFG_COMP(MT8183, ctx->param, subfrms[index].in.top);
1163 csf_b = CFG_COMP(MT8183, ctx->param, subfrms[index].in.bottom);
1164 }
1165
1166 hsize = csf_r - csf_l + 1;
1167 vsize = csf_b - csf_t + 1;
1168 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_CCORR_SIZE,
1169 (hsize << 16) + (vsize << 0), 0x1FFF1FFF);
1170 return 0;
1171 }
1172
1173 static const struct mdp_comp_ops ccorr_ops = {
1174 .get_comp_flag = get_comp_flag,
1175 .init_comp = init_ccorr,
1176 .config_subfrm = config_ccorr_subfrm,
1177 };
1178
init_aal(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)1179 static int init_aal(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
1180 {
1181 phys_addr_t base = ctx->comp->reg_base;
1182 u16 subsys_id = ctx->comp->subsys_id;
1183
1184 /* Always set MDP_AAL enable to 1 */
1185 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_AAL_EN, BIT(0), BIT(0));
1186
1187 return 0;
1188 }
1189
config_aal_frame(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,const struct v4l2_rect * compose)1190 static int config_aal_frame(struct mdp_comp_ctx *ctx,
1191 struct mdp_cmdq_cmd *cmd,
1192 const struct v4l2_rect *compose)
1193 {
1194 phys_addr_t base = ctx->comp->reg_base;
1195 u16 subsys_id = ctx->comp->subsys_id;
1196 u32 reg = 0;
1197
1198 if (CFG_CHECK(MT8195, p_id))
1199 reg = CFG_COMP(MT8195, ctx->param, aal.cfg_main);
1200 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_AAL_CFG_MAIN, reg, BIT(7));
1201
1202 if (CFG_CHECK(MT8195, p_id))
1203 reg = CFG_COMP(MT8195, ctx->param, aal.cfg);
1204 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_AAL_CFG, reg, BIT(0));
1205
1206 return 0;
1207 }
1208
config_aal_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)1209 static int config_aal_subfrm(struct mdp_comp_ctx *ctx,
1210 struct mdp_cmdq_cmd *cmd, u32 index)
1211 {
1212 phys_addr_t base = ctx->comp->reg_base;
1213 u16 subsys_id = ctx->comp->subsys_id;
1214 u32 reg = 0;
1215
1216 if (CFG_CHECK(MT8195, p_id))
1217 reg = CFG_COMP(MT8195, ctx->param, aal.subfrms[index].src);
1218 MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_SIZE, reg);
1219
1220 if (CFG_CHECK(MT8195, p_id))
1221 reg = CFG_COMP(MT8195, ctx->param, aal.subfrms[index].clip_ofst);
1222 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_AAL_OUTPUT_OFFSET, reg,
1223 0x00FF00FF);
1224
1225 if (CFG_CHECK(MT8195, p_id))
1226 reg = CFG_COMP(MT8195, ctx->param, aal.subfrms[index].clip);
1227 MM_REG_WRITE(cmd, subsys_id, base, MDP_AAL_OUTPUT_SIZE, reg);
1228
1229 return 0;
1230 }
1231
1232 static const struct mdp_comp_ops aal_ops = {
1233 .get_comp_flag = get_comp_flag,
1234 .init_comp = init_aal,
1235 .config_frame = config_aal_frame,
1236 .config_subfrm = config_aal_subfrm,
1237 };
1238
init_hdr(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)1239 static int init_hdr(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
1240 {
1241 phys_addr_t base = ctx->comp->reg_base;
1242 u16 subsys_id = ctx->comp->subsys_id;
1243
1244 /* Always set MDP_HDR enable to 1 */
1245 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_TOP, BIT(0), BIT(0));
1246
1247 return 0;
1248 }
1249
config_hdr_frame(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,const struct v4l2_rect * compose)1250 static int config_hdr_frame(struct mdp_comp_ctx *ctx,
1251 struct mdp_cmdq_cmd *cmd,
1252 const struct v4l2_rect *compose)
1253 {
1254 phys_addr_t base = ctx->comp->reg_base;
1255 u16 subsys_id = ctx->comp->subsys_id;
1256 u32 reg = 0;
1257
1258 if (CFG_CHECK(MT8195, p_id))
1259 reg = CFG_COMP(MT8195, ctx->param, hdr.top);
1260 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_TOP, reg, BIT(29) | BIT(28));
1261
1262 if (CFG_CHECK(MT8195, p_id))
1263 reg = CFG_COMP(MT8195, ctx->param, hdr.relay);
1264 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_RELAY, reg, BIT(0));
1265
1266 return 0;
1267 }
1268
config_hdr_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)1269 static int config_hdr_subfrm(struct mdp_comp_ctx *ctx,
1270 struct mdp_cmdq_cmd *cmd, u32 index)
1271 {
1272 phys_addr_t base = ctx->comp->reg_base;
1273 u16 subsys_id = ctx->comp->subsys_id;
1274 u32 reg = 0;
1275
1276 if (CFG_CHECK(MT8195, p_id))
1277 reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].win_size);
1278 MM_REG_WRITE(cmd, subsys_id, base, MDP_HDR_TILE_POS, reg);
1279
1280 if (CFG_CHECK(MT8195, p_id))
1281 reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].src);
1282 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_SIZE_0, reg, 0x1FFF1FFF);
1283
1284 if (CFG_CHECK(MT8195, p_id))
1285 reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].clip_ofst0);
1286 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_SIZE_1, reg, 0x1FFF1FFF);
1287
1288 if (CFG_CHECK(MT8195, p_id))
1289 reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].clip_ofst1);
1290 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_SIZE_2, reg, 0x1FFF1FFF);
1291
1292 if (CFG_CHECK(MT8195, p_id))
1293 reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hist_ctrl_0);
1294 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_HIST_CTRL_0, reg, 0x00003FFF);
1295
1296 if (CFG_CHECK(MT8195, p_id))
1297 reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hist_ctrl_1);
1298 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_HIST_CTRL_1, reg, 0x00003FFF);
1299
1300 if (CFG_CHECK(MT8195, p_id))
1301 reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hdr_top);
1302 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_TOP, reg, BIT(6) | BIT(5));
1303
1304 /* Enable histogram */
1305 if (CFG_CHECK(MT8195, p_id))
1306 reg = CFG_COMP(MT8195, ctx->param, hdr.subfrms[index].hist_addr);
1307 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_HDR_HIST_ADDR, reg, BIT(9));
1308
1309 return 0;
1310 }
1311
1312 static const struct mdp_comp_ops hdr_ops = {
1313 .get_comp_flag = get_comp_flag,
1314 .init_comp = init_hdr,
1315 .config_frame = config_hdr_frame,
1316 .config_subfrm = config_hdr_subfrm,
1317 };
1318
init_fg(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)1319 static int init_fg(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
1320 {
1321 phys_addr_t base = ctx->comp->reg_base;
1322 u16 subsys_id = ctx->comp->subsys_id;
1323
1324 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_FG_TRIGGER, BIT(2), BIT(2));
1325 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_FG_TRIGGER, 0x0, BIT(2));
1326
1327 return 0;
1328 }
1329
config_fg_frame(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,const struct v4l2_rect * compose)1330 static int config_fg_frame(struct mdp_comp_ctx *ctx,
1331 struct mdp_cmdq_cmd *cmd,
1332 const struct v4l2_rect *compose)
1333 {
1334 phys_addr_t base = ctx->comp->reg_base;
1335 u16 subsys_id = ctx->comp->subsys_id;
1336 u32 reg = 0;
1337
1338 if (CFG_CHECK(MT8195, p_id))
1339 reg = CFG_COMP(MT8195, ctx->param, fg.ctrl_0);
1340 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_FG_FG_CTRL_0, reg, BIT(0));
1341
1342 if (CFG_CHECK(MT8195, p_id))
1343 reg = CFG_COMP(MT8195, ctx->param, fg.ck_en);
1344 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_FG_FG_CK_EN, reg, 0x7);
1345
1346 return 0;
1347 }
1348
config_fg_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)1349 static int config_fg_subfrm(struct mdp_comp_ctx *ctx,
1350 struct mdp_cmdq_cmd *cmd, u32 index)
1351 {
1352 phys_addr_t base = ctx->comp->reg_base;
1353 u16 subsys_id = ctx->comp->subsys_id;
1354 u32 reg = 0;
1355
1356 if (CFG_CHECK(MT8195, p_id))
1357 reg = CFG_COMP(MT8195, ctx->param, fg.subfrms[index].info_0);
1358 MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TILE_INFO_0, reg);
1359
1360 if (CFG_CHECK(MT8195, p_id))
1361 reg = CFG_COMP(MT8195, ctx->param, fg.subfrms[index].info_1);
1362 MM_REG_WRITE(cmd, subsys_id, base, MDP_FG_TILE_INFO_1, reg);
1363
1364 return 0;
1365 }
1366
1367 static const struct mdp_comp_ops fg_ops = {
1368 .get_comp_flag = get_comp_flag,
1369 .init_comp = init_fg,
1370 .config_frame = config_fg_frame,
1371 .config_subfrm = config_fg_subfrm,
1372 };
1373
init_ovl(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)1374 static int init_ovl(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
1375 {
1376 phys_addr_t base = ctx->comp->reg_base;
1377 u16 subsys_id = ctx->comp->subsys_id;
1378
1379 MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_EN, BIT(0));
1380
1381 /* Set to relay mode */
1382 MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_SRC_CON, BIT(9));
1383 MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_DP_CON, BIT(0));
1384
1385 return 0;
1386 }
1387
config_ovl_frame(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,const struct v4l2_rect * compose)1388 static int config_ovl_frame(struct mdp_comp_ctx *ctx,
1389 struct mdp_cmdq_cmd *cmd,
1390 const struct v4l2_rect *compose)
1391 {
1392 phys_addr_t base = ctx->comp->reg_base;
1393 u16 subsys_id = ctx->comp->subsys_id;
1394 u32 reg = 0;
1395
1396 if (CFG_CHECK(MT8195, p_id))
1397 reg = CFG_COMP(MT8195, ctx->param, ovl.L0_con);
1398 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_OVL_L0_CON, reg, BIT(29) | BIT(28));
1399
1400 if (CFG_CHECK(MT8195, p_id))
1401 reg = CFG_COMP(MT8195, ctx->param, ovl.src_con);
1402 MM_REG_WRITE_MASK(cmd, subsys_id, base, MDP_OVL_SRC_CON, reg, BIT(0));
1403
1404 return 0;
1405 }
1406
config_ovl_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)1407 static int config_ovl_subfrm(struct mdp_comp_ctx *ctx,
1408 struct mdp_cmdq_cmd *cmd, u32 index)
1409 {
1410 phys_addr_t base = ctx->comp->reg_base;
1411 u16 subsys_id = ctx->comp->subsys_id;
1412 u32 reg = 0;
1413
1414 if (CFG_CHECK(MT8195, p_id))
1415 reg = CFG_COMP(MT8195, ctx->param, ovl.subfrms[index].L0_src_size);
1416 MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_L0_SRC_SIZE, reg);
1417
1418 /* Setup output size */
1419 if (CFG_CHECK(MT8195, p_id))
1420 reg = CFG_COMP(MT8195, ctx->param, ovl.subfrms[index].roi_size);
1421 MM_REG_WRITE(cmd, subsys_id, base, MDP_OVL_ROI_SIZE, reg);
1422
1423 return 0;
1424 }
1425
1426 static const struct mdp_comp_ops ovl_ops = {
1427 .get_comp_flag = get_comp_flag,
1428 .init_comp = init_ovl,
1429 .config_frame = config_ovl_frame,
1430 .config_subfrm = config_ovl_subfrm,
1431 };
1432
init_pad(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)1433 static int init_pad(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
1434 {
1435 phys_addr_t base = ctx->comp->reg_base;
1436 u16 subsys_id = ctx->comp->subsys_id;
1437
1438 MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_CON, BIT(1));
1439 /* Reset */
1440 MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_W_SIZE, 0);
1441 MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_H_SIZE, 0);
1442
1443 return 0;
1444 }
1445
config_pad_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)1446 static int config_pad_subfrm(struct mdp_comp_ctx *ctx,
1447 struct mdp_cmdq_cmd *cmd, u32 index)
1448 {
1449 phys_addr_t base = ctx->comp->reg_base;
1450 u16 subsys_id = ctx->comp->subsys_id;
1451 u32 reg = 0;
1452
1453 if (CFG_CHECK(MT8195, p_id))
1454 reg = CFG_COMP(MT8195, ctx->param, pad.subfrms[index].pic_size);
1455 MM_REG_WRITE(cmd, subsys_id, base, MDP_PAD_PIC_SIZE, reg);
1456
1457 return 0;
1458 }
1459
1460 static const struct mdp_comp_ops pad_ops = {
1461 .get_comp_flag = get_comp_flag,
1462 .init_comp = init_pad,
1463 .config_subfrm = config_pad_subfrm,
1464 };
1465
1466 static const struct mdp_comp_ops *mdp_comp_ops[MDP_COMP_TYPE_COUNT] = {
1467 [MDP_COMP_TYPE_RDMA] = &rdma_ops,
1468 [MDP_COMP_TYPE_RSZ] = &rsz_ops,
1469 [MDP_COMP_TYPE_WROT] = &wrot_ops,
1470 [MDP_COMP_TYPE_WDMA] = &wdma_ops,
1471 [MDP_COMP_TYPE_TDSHP] = &tdshp_ops,
1472 [MDP_COMP_TYPE_COLOR] = &color_ops,
1473 [MDP_COMP_TYPE_CCORR] = &ccorr_ops,
1474 [MDP_COMP_TYPE_AAL] = &aal_ops,
1475 [MDP_COMP_TYPE_HDR] = &hdr_ops,
1476 [MDP_COMP_TYPE_FG] = &fg_ops,
1477 [MDP_COMP_TYPE_OVL] = &ovl_ops,
1478 [MDP_COMP_TYPE_PAD] = &pad_ops,
1479 };
1480
1481 static const struct of_device_id mdp_comp_dt_ids[] __maybe_unused = {
1482 {
1483 .compatible = "mediatek,mt8183-mdp3-rdma",
1484 .data = (void *)MDP_COMP_TYPE_RDMA,
1485 }, {
1486 .compatible = "mediatek,mt8183-mdp3-ccorr",
1487 .data = (void *)MDP_COMP_TYPE_CCORR,
1488 }, {
1489 .compatible = "mediatek,mt8183-mdp3-rsz",
1490 .data = (void *)MDP_COMP_TYPE_RSZ,
1491 }, {
1492 .compatible = "mediatek,mt8183-mdp3-wrot",
1493 .data = (void *)MDP_COMP_TYPE_WROT,
1494 }, {
1495 .compatible = "mediatek,mt8183-mdp3-wdma",
1496 .data = (void *)MDP_COMP_TYPE_WDMA,
1497 }, {
1498 .compatible = "mediatek,mt8195-mdp3-rdma",
1499 .data = (void *)MDP_COMP_TYPE_RDMA,
1500 }, {
1501 .compatible = "mediatek,mt8195-mdp3-split",
1502 .data = (void *)MDP_COMP_TYPE_SPLIT,
1503 }, {
1504 .compatible = "mediatek,mt8195-mdp3-stitch",
1505 .data = (void *)MDP_COMP_TYPE_STITCH,
1506 }, {
1507 .compatible = "mediatek,mt8195-mdp3-fg",
1508 .data = (void *)MDP_COMP_TYPE_FG,
1509 }, {
1510 .compatible = "mediatek,mt8195-mdp3-hdr",
1511 .data = (void *)MDP_COMP_TYPE_HDR,
1512 }, {
1513 .compatible = "mediatek,mt8195-mdp3-aal",
1514 .data = (void *)MDP_COMP_TYPE_AAL,
1515 }, {
1516 .compatible = "mediatek,mt8195-mdp3-merge",
1517 .data = (void *)MDP_COMP_TYPE_MERGE,
1518 }, {
1519 .compatible = "mediatek,mt8195-mdp3-tdshp",
1520 .data = (void *)MDP_COMP_TYPE_TDSHP,
1521 }, {
1522 .compatible = "mediatek,mt8195-mdp3-color",
1523 .data = (void *)MDP_COMP_TYPE_COLOR,
1524 }, {
1525 .compatible = "mediatek,mt8195-mdp3-ovl",
1526 .data = (void *)MDP_COMP_TYPE_OVL,
1527 }, {
1528 .compatible = "mediatek,mt8195-mdp3-padding",
1529 .data = (void *)MDP_COMP_TYPE_PAD,
1530 }, {
1531 .compatible = "mediatek,mt8195-mdp3-tcc",
1532 .data = (void *)MDP_COMP_TYPE_TCC,
1533 },
1534 {}
1535 };
1536
is_dma_capable(const enum mdp_comp_type type)1537 static inline bool is_dma_capable(const enum mdp_comp_type type)
1538 {
1539 return (type == MDP_COMP_TYPE_RDMA ||
1540 type == MDP_COMP_TYPE_WROT ||
1541 type == MDP_COMP_TYPE_WDMA);
1542 }
1543
is_bypass_gce_event(const enum mdp_comp_type type)1544 static inline bool is_bypass_gce_event(const enum mdp_comp_type type)
1545 {
1546 /*
1547 * Subcomponent PATH is only used for the direction of data flow and
1548 * dose not need to wait for GCE event.
1549 */
1550 return (type == MDP_COMP_TYPE_PATH);
1551 }
1552
mdp_comp_get_id(struct mdp_dev * mdp,enum mdp_comp_type type,u32 alias_id)1553 static int mdp_comp_get_id(struct mdp_dev *mdp, enum mdp_comp_type type, u32 alias_id)
1554 {
1555 int i;
1556
1557 for (i = 0; i < mdp->mdp_data->comp_data_len; i++)
1558 if (mdp->mdp_data->comp_data[i].match.type == type &&
1559 mdp->mdp_data->comp_data[i].match.alias_id == alias_id)
1560 return i;
1561 return -ENODEV;
1562 }
1563
mdp_comp_clock_on(struct device * dev,struct mdp_comp * comp)1564 int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp)
1565 {
1566 int i, ret;
1567
1568 /* Only DMA capable components need the pm control */
1569 if (comp->comp_dev && is_dma_capable(comp->type)) {
1570 ret = pm_runtime_resume_and_get(comp->comp_dev);
1571 if (ret < 0) {
1572 dev_err(dev,
1573 "Failed to get power, err %d. type:%d id:%d\n",
1574 ret, comp->type, comp->inner_id);
1575 return ret;
1576 }
1577 }
1578
1579 for (i = 0; i < comp->clk_num; i++) {
1580 if (IS_ERR_OR_NULL(comp->clks[i]))
1581 continue;
1582 ret = clk_prepare_enable(comp->clks[i]);
1583 if (ret) {
1584 dev_err(dev,
1585 "Failed to enable clk %d. type:%d id:%d\n",
1586 i, comp->type, comp->inner_id);
1587 goto err_revert;
1588 }
1589 }
1590
1591 return 0;
1592
1593 err_revert:
1594 while (--i >= 0) {
1595 if (IS_ERR_OR_NULL(comp->clks[i]))
1596 continue;
1597 clk_disable_unprepare(comp->clks[i]);
1598 }
1599 if (comp->comp_dev && is_dma_capable(comp->type))
1600 pm_runtime_put_sync(comp->comp_dev);
1601
1602 return ret;
1603 }
1604
mdp_comp_clock_off(struct device * dev,struct mdp_comp * comp)1605 void mdp_comp_clock_off(struct device *dev, struct mdp_comp *comp)
1606 {
1607 int i;
1608
1609 for (i = 0; i < comp->clk_num; i++) {
1610 if (IS_ERR_OR_NULL(comp->clks[i]))
1611 continue;
1612 clk_disable_unprepare(comp->clks[i]);
1613 }
1614
1615 if (comp->comp_dev && is_dma_capable(comp->type))
1616 pm_runtime_put(comp->comp_dev);
1617 }
1618
mdp_comp_clocks_on(struct device * dev,struct mdp_comp * comps,int num)1619 int mdp_comp_clocks_on(struct device *dev, struct mdp_comp *comps, int num)
1620 {
1621 int i, ret;
1622
1623 for (i = 0; i < num; i++) {
1624 struct mdp_dev *m = comps[i].mdp_dev;
1625 enum mtk_mdp_comp_id id;
1626 const struct mdp_comp_blend *b;
1627
1628 /* Bypass the dummy component*/
1629 if (!m)
1630 continue;
1631
1632 ret = mdp_comp_clock_on(dev, &comps[i]);
1633 if (ret)
1634 return ret;
1635
1636 id = comps[i].public_id;
1637 b = &m->mdp_data->comp_data[id].blend;
1638
1639 if (b && b->aid_clk) {
1640 ret = mdp_comp_clock_on(dev, m->comp[b->b_id]);
1641 if (ret)
1642 return ret;
1643 }
1644 }
1645
1646 return 0;
1647 }
1648
mdp_comp_clocks_off(struct device * dev,struct mdp_comp * comps,int num)1649 void mdp_comp_clocks_off(struct device *dev, struct mdp_comp *comps, int num)
1650 {
1651 int i;
1652
1653 for (i = 0; i < num; i++) {
1654 struct mdp_dev *m = comps[i].mdp_dev;
1655 enum mtk_mdp_comp_id id;
1656 const struct mdp_comp_blend *b;
1657
1658 /* Bypass the dummy component*/
1659 if (!m)
1660 continue;
1661
1662 mdp_comp_clock_off(dev, &comps[i]);
1663
1664 id = comps[i].public_id;
1665 b = &m->mdp_data->comp_data[id].blend;
1666
1667 if (b && b->aid_clk)
1668 mdp_comp_clock_off(dev, m->comp[b->b_id]);
1669 }
1670 }
1671
mdp_get_subsys_id(struct mdp_dev * mdp,struct device * dev,struct device_node * node,struct mdp_comp * comp)1672 static int mdp_get_subsys_id(struct mdp_dev *mdp, struct device *dev,
1673 struct device_node *node, struct mdp_comp *comp)
1674 {
1675 struct platform_device *comp_pdev;
1676 struct cmdq_client_reg cmdq_reg;
1677 int ret = 0;
1678 int index = 0;
1679
1680 if (!dev || !node || !comp)
1681 return -EINVAL;
1682
1683 comp_pdev = of_find_device_by_node(node);
1684
1685 if (!comp_pdev) {
1686 dev_err(dev, "get comp_pdev fail! comp public id=%d, inner id=%d, type=%d\n",
1687 comp->public_id, comp->inner_id, comp->type);
1688 return -ENODEV;
1689 }
1690
1691 index = mdp->mdp_data->comp_data[comp->public_id].info.dts_reg_ofst;
1692 ret = cmdq_dev_get_client_reg(&comp_pdev->dev, &cmdq_reg, index);
1693 if (ret != 0) {
1694 dev_err(&comp_pdev->dev, "cmdq_dev_get_subsys fail!\n");
1695 put_device(&comp_pdev->dev);
1696 return -EINVAL;
1697 }
1698
1699 comp->subsys_id = cmdq_reg.subsys;
1700 dev_dbg(&comp_pdev->dev, "subsys id=%d\n", cmdq_reg.subsys);
1701 put_device(&comp_pdev->dev);
1702
1703 return 0;
1704 }
1705
__mdp_comp_init(struct mdp_dev * mdp,struct device_node * node,struct mdp_comp * comp)1706 static void __mdp_comp_init(struct mdp_dev *mdp, struct device_node *node,
1707 struct mdp_comp *comp)
1708 {
1709 struct resource res;
1710 phys_addr_t base;
1711 int index;
1712
1713 index = mdp->mdp_data->comp_data[comp->public_id].info.dts_reg_ofst;
1714 if (of_address_to_resource(node, index, &res) < 0)
1715 base = 0L;
1716 else
1717 base = res.start;
1718
1719 comp->mdp_dev = mdp;
1720 comp->regs = of_iomap(node, 0);
1721 comp->reg_base = base;
1722 }
1723
mdp_comp_init(struct mdp_dev * mdp,struct device_node * node,struct mdp_comp * comp,enum mtk_mdp_comp_id id)1724 static int mdp_comp_init(struct mdp_dev *mdp, struct device_node *node,
1725 struct mdp_comp *comp, enum mtk_mdp_comp_id id)
1726 {
1727 struct device *dev = &mdp->pdev->dev;
1728 struct platform_device *pdev_c;
1729 int clk_ofst;
1730 int i;
1731 s32 event;
1732
1733 if (id < 0 || id >= MDP_MAX_COMP_COUNT) {
1734 dev_err(dev, "Invalid component id %d\n", id);
1735 return -EINVAL;
1736 }
1737
1738 pdev_c = of_find_device_by_node(node);
1739 if (!pdev_c) {
1740 dev_warn(dev, "can't find platform device of node:%s\n",
1741 node->name);
1742 return -ENODEV;
1743 }
1744
1745 comp->comp_dev = &pdev_c->dev;
1746 comp->public_id = id;
1747 comp->type = mdp->mdp_data->comp_data[id].match.type;
1748 comp->inner_id = mdp->mdp_data->comp_data[id].match.inner_id;
1749 comp->alias_id = mdp->mdp_data->comp_data[id].match.alias_id;
1750 comp->ops = mdp_comp_ops[comp->type];
1751 __mdp_comp_init(mdp, node, comp);
1752
1753 comp->clk_num = mdp->mdp_data->comp_data[id].info.clk_num;
1754 comp->clks = devm_kzalloc(dev, sizeof(struct clk *) * comp->clk_num,
1755 GFP_KERNEL);
1756 if (!comp->clks)
1757 return -ENOMEM;
1758
1759 clk_ofst = mdp->mdp_data->comp_data[id].info.clk_ofst;
1760
1761 for (i = 0; i < comp->clk_num; i++) {
1762 comp->clks[i] = of_clk_get(node, i + clk_ofst);
1763 if (IS_ERR(comp->clks[i]))
1764 break;
1765 }
1766
1767 mdp_get_subsys_id(mdp, dev, node, comp);
1768
1769 /* Set GCE SOF event */
1770 if (is_bypass_gce_event(comp->type) ||
1771 of_property_read_u32_index(node, "mediatek,gce-events",
1772 MDP_GCE_EVENT_SOF, &event))
1773 event = MDP_GCE_NO_EVENT;
1774
1775 comp->gce_event[MDP_GCE_EVENT_SOF] = event;
1776
1777 /* Set GCE EOF event */
1778 if (is_dma_capable(comp->type)) {
1779 if (of_property_read_u32_index(node, "mediatek,gce-events",
1780 MDP_GCE_EVENT_EOF, &event)) {
1781 dev_err(dev, "Component id %d has no EOF\n", id);
1782 return -EINVAL;
1783 }
1784 } else {
1785 event = MDP_GCE_NO_EVENT;
1786 }
1787
1788 comp->gce_event[MDP_GCE_EVENT_EOF] = event;
1789
1790 return 0;
1791 }
1792
mdp_comp_deinit(struct mdp_comp * comp)1793 static void mdp_comp_deinit(struct mdp_comp *comp)
1794 {
1795 if (!comp)
1796 return;
1797
1798 if (comp->comp_dev && comp->clks) {
1799 devm_kfree(&comp->mdp_dev->pdev->dev, comp->clks);
1800 comp->clks = NULL;
1801 }
1802
1803 if (comp->regs)
1804 iounmap(comp->regs);
1805 }
1806
mdp_comp_create(struct mdp_dev * mdp,struct device_node * node,enum mtk_mdp_comp_id id)1807 static struct mdp_comp *mdp_comp_create(struct mdp_dev *mdp,
1808 struct device_node *node,
1809 enum mtk_mdp_comp_id id)
1810 {
1811 struct device *dev = &mdp->pdev->dev;
1812 struct mdp_comp *comp;
1813 int ret;
1814
1815 if (mdp->comp[id])
1816 return ERR_PTR(-EEXIST);
1817
1818 comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
1819 if (!comp)
1820 return ERR_PTR(-ENOMEM);
1821
1822 ret = mdp_comp_init(mdp, node, comp, id);
1823 if (ret) {
1824 devm_kfree(dev, comp);
1825 return ERR_PTR(ret);
1826 }
1827 mdp->comp[id] = comp;
1828 mdp->comp[id]->mdp_dev = mdp;
1829
1830 dev_dbg(dev, "%s type:%d alias:%d public id:%d inner id:%d base:%#x regs:%p\n",
1831 dev->of_node->name, comp->type, comp->alias_id, id, comp->inner_id,
1832 (u32)comp->reg_base, comp->regs);
1833 return comp;
1834 }
1835
mdp_comp_sub_create(struct mdp_dev * mdp)1836 static int mdp_comp_sub_create(struct mdp_dev *mdp)
1837 {
1838 struct device *dev = &mdp->pdev->dev;
1839 struct device_node *node, *parent;
1840 int ret = 0;
1841
1842 parent = dev->of_node->parent;
1843
1844 for_each_child_of_node(parent, node) {
1845 const struct of_device_id *of_id;
1846 enum mdp_comp_type type;
1847 int id, alias_id;
1848 struct mdp_comp *comp;
1849
1850 of_id = of_match_node(mdp->mdp_data->mdp_sub_comp_dt_ids, node);
1851 if (!of_id)
1852 continue;
1853 if (!of_device_is_available(node)) {
1854 dev_dbg(dev, "Skipping disabled sub comp. %pOF\n",
1855 node);
1856 continue;
1857 }
1858
1859 type = (enum mdp_comp_type)(uintptr_t)of_id->data;
1860 alias_id = mdp_comp_alias_id[type];
1861 id = mdp_comp_get_id(mdp, type, alias_id);
1862 if (id < 0) {
1863 dev_err(dev,
1864 "Fail to get sub comp. id: type %d alias %d\n",
1865 type, alias_id);
1866 ret = -EINVAL;
1867 goto err_free_node;
1868 }
1869 mdp_comp_alias_id[type]++;
1870
1871 comp = mdp_comp_create(mdp, node, id);
1872 if (IS_ERR(comp)) {
1873 ret = PTR_ERR(comp);
1874 goto err_free_node;
1875 }
1876 }
1877 return ret;
1878
1879 err_free_node:
1880 of_node_put(node);
1881 return ret;
1882 }
1883
mdp_comp_destroy(struct mdp_dev * mdp)1884 void mdp_comp_destroy(struct mdp_dev *mdp)
1885 {
1886 int i;
1887
1888 for (i = 0; i < ARRAY_SIZE(mdp->comp); i++) {
1889 if (mdp->comp[i]) {
1890 if (is_dma_capable(mdp->comp[i]->type))
1891 pm_runtime_disable(mdp->comp[i]->comp_dev);
1892 mdp_comp_deinit(mdp->comp[i]);
1893 devm_kfree(mdp->comp[i]->comp_dev, mdp->comp[i]);
1894 mdp->comp[i] = NULL;
1895 }
1896 }
1897 }
1898
mdp_comp_config(struct mdp_dev * mdp)1899 int mdp_comp_config(struct mdp_dev *mdp)
1900 {
1901 struct device *dev = &mdp->pdev->dev;
1902 struct device_node *node, *parent;
1903 int ret;
1904
1905 memset(mdp_comp_alias_id, 0, sizeof(mdp_comp_alias_id));
1906 p_id = mdp->mdp_data->mdp_plat_id;
1907
1908 parent = dev->of_node->parent;
1909 /* Iterate over sibling MDP function blocks */
1910 for_each_child_of_node(parent, node) {
1911 const struct of_device_id *of_id;
1912 enum mdp_comp_type type;
1913 int id, alias_id;
1914 struct mdp_comp *comp;
1915
1916 of_id = of_match_node(mdp_comp_dt_ids, node);
1917 if (!of_id)
1918 continue;
1919
1920 if (!of_device_is_available(node)) {
1921 dev_dbg(dev, "Skipping disabled component %pOF\n",
1922 node);
1923 continue;
1924 }
1925
1926 type = (enum mdp_comp_type)(uintptr_t)of_id->data;
1927 alias_id = mdp_comp_alias_id[type];
1928 id = mdp_comp_get_id(mdp, type, alias_id);
1929 if (id < 0) {
1930 dev_err(dev,
1931 "Fail to get component id: type %d alias %d\n",
1932 type, alias_id);
1933 continue;
1934 }
1935 mdp_comp_alias_id[type]++;
1936
1937 comp = mdp_comp_create(mdp, node, id);
1938 if (IS_ERR(comp)) {
1939 ret = PTR_ERR(comp);
1940 of_node_put(node);
1941 goto err_init_comps;
1942 }
1943
1944 /* Only DMA capable components need the pm control */
1945 if (!is_dma_capable(comp->type))
1946 continue;
1947 pm_runtime_enable(comp->comp_dev);
1948 }
1949
1950 ret = mdp_comp_sub_create(mdp);
1951 if (ret)
1952 goto err_init_comps;
1953
1954 return 0;
1955
1956 err_init_comps:
1957 mdp_comp_destroy(mdp);
1958 return ret;
1959 }
1960
mdp_comp_ctx_config(struct mdp_dev * mdp,struct mdp_comp_ctx * ctx,const struct img_compparam * param,const struct img_ipi_frameparam * frame)1961 int mdp_comp_ctx_config(struct mdp_dev *mdp, struct mdp_comp_ctx *ctx,
1962 const struct img_compparam *param,
1963 const struct img_ipi_frameparam *frame)
1964 {
1965 struct device *dev = &mdp->pdev->dev;
1966 enum mtk_mdp_comp_id public_id = MDP_COMP_NONE;
1967 u32 arg;
1968 int i, idx;
1969
1970 if (!param) {
1971 dev_err(dev, "Invalid component param");
1972 return -EINVAL;
1973 }
1974
1975 if (CFG_CHECK(MT8183, p_id))
1976 arg = CFG_COMP(MT8183, param, type);
1977 else if (CFG_CHECK(MT8195, p_id))
1978 arg = CFG_COMP(MT8195, param, type);
1979 else
1980 return -EINVAL;
1981 public_id = mdp_cfg_get_id_public(mdp, arg);
1982 if (public_id < 0) {
1983 dev_err(dev, "Invalid component id %d", public_id);
1984 return -EINVAL;
1985 }
1986
1987 ctx->comp = mdp->comp[public_id];
1988 if (!ctx->comp) {
1989 dev_err(dev, "Uninit component inner id %d", arg);
1990 return -EINVAL;
1991 }
1992
1993 ctx->param = param;
1994 if (CFG_CHECK(MT8183, p_id))
1995 arg = CFG_COMP(MT8183, param, input);
1996 else if (CFG_CHECK(MT8195, p_id))
1997 arg = CFG_COMP(MT8195, param, input);
1998 else
1999 return -EINVAL;
2000 ctx->input = &frame->inputs[arg];
2001 if (CFG_CHECK(MT8183, p_id))
2002 idx = CFG_COMP(MT8183, param, num_outputs);
2003 else if (CFG_CHECK(MT8195, p_id))
2004 idx = CFG_COMP(MT8195, param, num_outputs);
2005 else
2006 return -EINVAL;
2007 for (i = 0; i < idx; i++) {
2008 if (CFG_CHECK(MT8183, p_id))
2009 arg = CFG_COMP(MT8183, param, outputs[i]);
2010 else if (CFG_CHECK(MT8195, p_id))
2011 arg = CFG_COMP(MT8195, param, outputs[i]);
2012 else
2013 return -EINVAL;
2014 ctx->outputs[i] = &frame->outputs[arg];
2015 }
2016 return 0;
2017 }
2018