1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hwseq.h"
33 #include "dcn10/dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10/dcn10_optc.h"
38 #include "dcn10/dcn10_dpp.h"
39 #include "dcn10/dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10/dcn10_hubp.h"
46 #include "dcn10/dcn10_hubbub.h"
47 #include "dcn10/dcn10_cm_common.h"
48 #include "dccg.h"
49 #include "clk_mgr.h"
50 #include "link_hwss.h"
51 #include "dpcd_defs.h"
52 #include "dsc.h"
53 #include "dce/dmub_psr.h"
54 #include "dc_dmub_srv.h"
55 #include "dce/dmub_hw_lock_mgr.h"
56 #include "dc_trace.h"
57 #include "dce/dmub_outbox.h"
58 #include "link.h"
59 #include "dc_state_priv.h"
60 
61 #define DC_LOGGER \
62 	dc_logger
63 #define DC_LOGGER_INIT(logger) \
64 	struct dal_logger *dc_logger = logger
65 
66 #define CTX \
67 	hws->ctx
68 #define REG(reg)\
69 	hws->regs->reg
70 
71 #undef FN
72 #define FN(reg_name, field_name) \
73 	hws->shifts->field_name, hws->masks->field_name
74 
75 /*print is 17 wide, first two characters are spaces*/
76 #define DTN_INFO_MICRO_SEC(ref_cycle) \
77 	print_microsec(dc_ctx, log_ctx, ref_cycle)
78 
79 #define GAMMA_HW_POINTS_NUM 256
80 
81 #define PGFSM_POWER_ON 0
82 #define PGFSM_POWER_OFF 2
83 
print_microsec(struct dc_context * dc_ctx,struct dc_log_buffer_ctx * log_ctx,uint32_t ref_cycle)84 static void print_microsec(struct dc_context *dc_ctx,
85 			   struct dc_log_buffer_ctx *log_ctx,
86 			   uint32_t ref_cycle)
87 {
88 	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
89 	static const unsigned int frac = 1000;
90 	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
91 
92 	DTN_INFO("  %11d.%03d",
93 			us_x10 / frac,
94 			us_x10 % frac);
95 }
96 
dcn10_lock_all_pipes(struct dc * dc,struct dc_state * context,bool lock)97 void dcn10_lock_all_pipes(struct dc *dc,
98 	struct dc_state *context,
99 	bool lock)
100 {
101 	struct pipe_ctx *pipe_ctx;
102 	struct pipe_ctx *old_pipe_ctx;
103 	struct timing_generator *tg;
104 	int i;
105 
106 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
107 		old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
108 		pipe_ctx = &context->res_ctx.pipe_ctx[i];
109 		tg = pipe_ctx->stream_res.tg;
110 
111 		/*
112 		 * Only lock the top pipe's tg to prevent redundant
113 		 * (un)locking. Also skip if pipe is disabled.
114 		 */
115 		if (pipe_ctx->top_pipe ||
116 		    !pipe_ctx->stream ||
117 		    (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
118 		    !tg->funcs->is_tg_enabled(tg) ||
119 			dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM)
120 			continue;
121 
122 		if (lock)
123 			dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
124 		else
125 			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
126 	}
127 }
128 
log_mpc_crc(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)129 static void log_mpc_crc(struct dc *dc,
130 	struct dc_log_buffer_ctx *log_ctx)
131 {
132 	struct dc_context *dc_ctx = dc->ctx;
133 	struct dce_hwseq *hws = dc->hwseq;
134 
135 	if (REG(MPC_CRC_RESULT_GB))
136 		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
137 		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
138 	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
139 		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
140 		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
141 }
142 
dcn10_log_hubbub_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)143 static void dcn10_log_hubbub_state(struct dc *dc,
144 				   struct dc_log_buffer_ctx *log_ctx)
145 {
146 	struct dc_context *dc_ctx = dc->ctx;
147 	struct dcn_hubbub_wm wm;
148 	int i;
149 
150 	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
151 	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
152 
153 	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
154 			"         sr_enter          sr_exit  dram_clk_change\n");
155 
156 	for (i = 0; i < 4; i++) {
157 		struct dcn_hubbub_wm_set *s;
158 
159 		s = &wm.sets[i];
160 		DTN_INFO("WM_Set[%d]:", s->wm_set);
161 		DTN_INFO_MICRO_SEC(s->data_urgent);
162 		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
163 		DTN_INFO_MICRO_SEC(s->sr_enter);
164 		DTN_INFO_MICRO_SEC(s->sr_exit);
165 		DTN_INFO_MICRO_SEC(s->dram_clk_change);
166 		DTN_INFO("\n");
167 	}
168 
169 	DTN_INFO("\n");
170 }
171 
dcn10_log_hubp_states(struct dc * dc,void * log_ctx)172 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
173 {
174 	struct dc_context *dc_ctx = dc->ctx;
175 	struct resource_pool *pool = dc->res_pool;
176 	int i;
177 
178 	DTN_INFO(
179 		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
180 	for (i = 0; i < pool->pipe_count; i++) {
181 		struct hubp *hubp = pool->hubps[i];
182 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
183 
184 		hubp->funcs->hubp_read_state(hubp);
185 
186 		if (!s->blank_en) {
187 			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
188 					hubp->inst,
189 					s->pixel_format,
190 					s->inuse_addr_hi,
191 					s->viewport_width,
192 					s->viewport_height,
193 					s->rotation_angle,
194 					s->h_mirror_en,
195 					s->sw_mode,
196 					s->dcc_en,
197 					s->blank_en,
198 					s->clock_en,
199 					s->ttu_disable,
200 					s->underflow_status);
201 			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
202 			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
203 			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
204 			DTN_INFO("\n");
205 		}
206 	}
207 
208 	DTN_INFO("\n=========RQ========\n");
209 	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
210 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
211 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
212 	for (i = 0; i < pool->pipe_count; i++) {
213 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
214 		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
215 
216 		if (!s->blank_en)
217 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
218 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
219 				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
220 				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
221 				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
222 				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
223 				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
224 				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
225 				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
226 				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
227 	}
228 
229 	DTN_INFO("========DLG========\n");
230 	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
231 			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
232 			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
233 			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
234 			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
235 			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
236 			"  x_rp_dlay  x_rr_sfl  rc_td_grp\n");
237 
238 	for (i = 0; i < pool->pipe_count; i++) {
239 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
240 		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
241 
242 		if (!s->blank_en)
243 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
244 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
245 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh %xh\n",
246 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
247 				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
248 				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
249 				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
250 				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
251 				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
252 				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
253 				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
254 				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
255 				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
256 				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
257 				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
258 				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
259 				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
260 				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
261 				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
262 				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
263 				dlg_regs->xfc_reg_remote_surface_flip_latency, dlg_regs->refcyc_per_tdlut_group);
264 	}
265 
266 	DTN_INFO("========TTU========\n");
267 	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
268 			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
269 			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
270 	for (i = 0; i < pool->pipe_count; i++) {
271 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
272 		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
273 
274 		if (!s->blank_en)
275 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
276 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
277 				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
278 				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
279 				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
280 				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
281 				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
282 				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
283 	}
284 	DTN_INFO("\n");
285 }
286 
dcn10_log_color_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)287 static void dcn10_log_color_state(struct dc *dc,
288 				  struct dc_log_buffer_ctx *log_ctx)
289 {
290 	struct dc_context *dc_ctx = dc->ctx;
291 	struct resource_pool *pool = dc->res_pool;
292 	bool is_gamut_remap_available = false;
293 	int i;
294 
295 	DTN_INFO("DPP:    IGAM format    IGAM mode    DGAM mode    RGAM mode"
296 		 "  GAMUT adjust  "
297 		 "C11        C12        C13        C14        "
298 		 "C21        C22        C23        C24        "
299 		 "C31        C32        C33        C34        \n");
300 	for (i = 0; i < pool->pipe_count; i++) {
301 		struct dpp *dpp = pool->dpps[i];
302 		struct dcn_dpp_state s = {0};
303 
304 		dpp->funcs->dpp_read_state(dpp, &s);
305 		if (dpp->funcs->dpp_get_gamut_remap) {
306 			dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
307 			is_gamut_remap_available = true;
308 		}
309 
310 		if (!s.is_enabled)
311 			continue;
312 
313 		DTN_INFO("[%2d]:  %11xh  %11s    %9s    %9s",
314 				dpp->inst,
315 				s.igam_input_format,
316 				(s.igam_lut_mode == 0) ? "BypassFixed" :
317 					((s.igam_lut_mode == 1) ? "BypassFloat" :
318 					((s.igam_lut_mode == 2) ? "RAM" :
319 					((s.igam_lut_mode == 3) ? "RAM" :
320 								 "Unknown"))),
321 				(s.dgam_lut_mode == 0) ? "Bypass" :
322 					((s.dgam_lut_mode == 1) ? "sRGB" :
323 					((s.dgam_lut_mode == 2) ? "Ycc" :
324 					((s.dgam_lut_mode == 3) ? "RAM" :
325 					((s.dgam_lut_mode == 4) ? "RAM" :
326 								 "Unknown")))),
327 				(s.rgam_lut_mode == 0) ? "Bypass" :
328 					((s.rgam_lut_mode == 1) ? "sRGB" :
329 					((s.rgam_lut_mode == 2) ? "Ycc" :
330 					((s.rgam_lut_mode == 3) ? "RAM" :
331 					((s.rgam_lut_mode == 4) ? "RAM" :
332 								 "Unknown")))));
333 		if (is_gamut_remap_available)
334 			DTN_INFO("  %12s  "
335 				 "%010lld %010lld %010lld %010lld "
336 				 "%010lld %010lld %010lld %010lld "
337 				 "%010lld %010lld %010lld %010lld",
338 				 (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
339 					((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : "SW"),
340 				 s.gamut_remap.temperature_matrix[0].value,
341 				 s.gamut_remap.temperature_matrix[1].value,
342 				 s.gamut_remap.temperature_matrix[2].value,
343 				 s.gamut_remap.temperature_matrix[3].value,
344 				 s.gamut_remap.temperature_matrix[4].value,
345 				 s.gamut_remap.temperature_matrix[5].value,
346 				 s.gamut_remap.temperature_matrix[6].value,
347 				 s.gamut_remap.temperature_matrix[7].value,
348 				 s.gamut_remap.temperature_matrix[8].value,
349 				 s.gamut_remap.temperature_matrix[9].value,
350 				 s.gamut_remap.temperature_matrix[10].value,
351 				 s.gamut_remap.temperature_matrix[11].value);
352 
353 		DTN_INFO("\n");
354 	}
355 	DTN_INFO("\n");
356 	DTN_INFO("DPP Color Caps: input_lut_shared:%d  icsc:%d"
357 		 "  dgam_ram:%d  dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
358 		 "  post_csc:%d  gamcor:%d  dgam_rom_for_yuv:%d  3d_lut:%d"
359 		 "  blnd_lut:%d  oscs:%d\n\n",
360 		 dc->caps.color.dpp.input_lut_shared,
361 		 dc->caps.color.dpp.icsc,
362 		 dc->caps.color.dpp.dgam_ram,
363 		 dc->caps.color.dpp.dgam_rom_caps.srgb,
364 		 dc->caps.color.dpp.dgam_rom_caps.bt2020,
365 		 dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
366 		 dc->caps.color.dpp.dgam_rom_caps.pq,
367 		 dc->caps.color.dpp.dgam_rom_caps.hlg,
368 		 dc->caps.color.dpp.post_csc,
369 		 dc->caps.color.dpp.gamma_corr,
370 		 dc->caps.color.dpp.dgam_rom_for_yuv,
371 		 dc->caps.color.dpp.hw_3d_lut,
372 		 dc->caps.color.dpp.ogam_ram,
373 		 dc->caps.color.dpp.ocsc);
374 
375 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
376 	for (i = 0; i < pool->mpcc_count; i++) {
377 		struct mpcc_state s = {0};
378 
379 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
380 		if (s.opp_id != 0xf)
381 			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
382 				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
383 				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
384 				s.idle);
385 	}
386 	DTN_INFO("\n");
387 	DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
388 		 dc->caps.color.mpc.gamut_remap,
389 		 dc->caps.color.mpc.num_3dluts,
390 		 dc->caps.color.mpc.ogam_ram,
391 		 dc->caps.color.mpc.ocsc);
392 }
393 
dcn10_log_hw_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)394 void dcn10_log_hw_state(struct dc *dc,
395 			struct dc_log_buffer_ctx *log_ctx)
396 {
397 	struct dc_context *dc_ctx = dc->ctx;
398 	struct resource_pool *pool = dc->res_pool;
399 	int i;
400 
401 	DTN_INFO_BEGIN();
402 
403 	dcn10_log_hubbub_state(dc, log_ctx);
404 
405 	dcn10_log_hubp_states(dc, log_ctx);
406 
407 	if (dc->hwss.log_color_state)
408 		dc->hwss.log_color_state(dc, log_ctx);
409 	else
410 		dcn10_log_color_state(dc, log_ctx);
411 
412 	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
413 
414 	for (i = 0; i < pool->timing_generator_count; i++) {
415 		struct timing_generator *tg = pool->timing_generators[i];
416 		struct dcn_otg_state s = {0};
417 		/* Read shared OTG state registers for all DCNx */
418 		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
419 
420 		/*
421 		 * For DCN2 and greater, a register on the OPP is used to
422 		 * determine if the CRTC is blanked instead of the OTG. So use
423 		 * dpg_is_blanked() if exists, otherwise fallback on otg.
424 		 *
425 		 * TODO: Implement DCN-specific read_otg_state hooks.
426 		 */
427 		if (pool->opps[i]->funcs->dpg_is_blanked)
428 			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
429 		else
430 			s.blank_enabled = tg->funcs->is_blanked(tg);
431 
432 		//only print if OTG master is enabled
433 		if ((s.otg_enabled & 1) == 0)
434 			continue;
435 
436 		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
437 				tg->inst,
438 				s.v_blank_start,
439 				s.v_blank_end,
440 				s.v_sync_a_start,
441 				s.v_sync_a_end,
442 				s.v_sync_a_pol,
443 				s.v_total_max,
444 				s.v_total_min,
445 				s.v_total_max_sel,
446 				s.v_total_min_sel,
447 				s.h_blank_start,
448 				s.h_blank_end,
449 				s.h_sync_a_start,
450 				s.h_sync_a_end,
451 				s.h_sync_a_pol,
452 				s.h_total,
453 				s.v_total,
454 				s.underflow_occurred_status,
455 				s.blank_enabled);
456 
457 		// Clear underflow for debug purposes
458 		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
459 		// This function is called only from Windows or Diags test environment, hence it's safe to clear
460 		// it from here without affecting the original intent.
461 		tg->funcs->clear_optc_underflow(tg);
462 	}
463 	DTN_INFO("\n");
464 
465 	// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
466 	// TODO: Update golden log header to reflect this name change
467 	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
468 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
469 		struct display_stream_compressor *dsc = pool->dscs[i];
470 		struct dcn_dsc_state s = {0};
471 
472 		dsc->funcs->dsc_read_state(dsc, &s);
473 		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
474 		dsc->inst,
475 			s.dsc_clock_en,
476 			s.dsc_slice_width,
477 			s.dsc_bits_per_pixel);
478 		DTN_INFO("\n");
479 	}
480 	DTN_INFO("\n");
481 
482 	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
483 			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
484 	for (i = 0; i < pool->stream_enc_count; i++) {
485 		struct stream_encoder *enc = pool->stream_enc[i];
486 		struct enc_state s = {0};
487 
488 		if (enc->funcs->enc_read_state) {
489 			enc->funcs->enc_read_state(enc, &s);
490 			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
491 				enc->id,
492 				s.dsc_mode,
493 				s.sec_gsp_pps_line_num,
494 				s.vbid6_line_reference,
495 				s.vbid6_line_num,
496 				s.sec_gsp_pps_enable,
497 				s.sec_stream_enable);
498 			DTN_INFO("\n");
499 		}
500 	}
501 	DTN_INFO("\n");
502 
503 	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
504 	for (i = 0; i < dc->link_count; i++) {
505 		struct link_encoder *lenc = dc->links[i]->link_enc;
506 
507 		struct link_enc_state s = {0};
508 
509 		if (lenc && lenc->funcs->read_state) {
510 			lenc->funcs->read_state(lenc, &s);
511 			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
512 				i,
513 				s.dphy_fec_en,
514 				s.dphy_fec_ready_shadow,
515 				s.dphy_fec_active_status,
516 				s.dp_link_training_complete);
517 			DTN_INFO("\n");
518 		}
519 	}
520 	DTN_INFO("\n");
521 
522 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
523 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
524 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
525 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
526 			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
527 			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
528 			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
529 			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
530 			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
531 
532 	log_mpc_crc(dc, log_ctx);
533 
534 	{
535 		if (pool->hpo_dp_stream_enc_count > 0) {
536 			DTN_INFO("DP HPO S_ENC:  Enabled  OTG   Format   Depth   Vid   SDP   Compressed  Link\n");
537 			for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
538 				struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
539 				struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
540 
541 				if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
542 					hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
543 
544 					DTN_INFO("[%d]:                 %d    %d   %6s       %d     %d     %d            %d     %d\n",
545 							hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
546 							hpo_dp_se_state.stream_enc_enabled,
547 							hpo_dp_se_state.otg_inst,
548 							(hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
549 									((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
550 									(hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
551 							(hpo_dp_se_state.component_depth == 0) ? 6 :
552 									((hpo_dp_se_state.component_depth == 1) ? 8 :
553 									(hpo_dp_se_state.component_depth == 2) ? 10 : 12),
554 							hpo_dp_se_state.vid_stream_enabled,
555 							hpo_dp_se_state.sdp_enabled,
556 							hpo_dp_se_state.compressed_format,
557 							hpo_dp_se_state.mapped_to_link_enc);
558 				}
559 			}
560 
561 			DTN_INFO("\n");
562 		}
563 
564 		/* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
565 		if (pool->hpo_dp_link_enc_count) {
566 			DTN_INFO("DP HPO L_ENC:  Enabled  Mode   Lanes   Stream  Slots   VC Rate X    VC Rate Y\n");
567 
568 			for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
569 				struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
570 				struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
571 
572 				if (hpo_dp_link_enc->funcs->read_state) {
573 					hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
574 					DTN_INFO("[%d]:                 %d  %6s     %d        %d      %d     %d     %d\n",
575 							hpo_dp_link_enc->inst,
576 							hpo_dp_le_state.link_enc_enabled,
577 							(hpo_dp_le_state.link_mode == 0) ? "TPS1" :
578 									(hpo_dp_le_state.link_mode == 1) ? "TPS2" :
579 									(hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
580 							hpo_dp_le_state.lane_count,
581 							hpo_dp_le_state.stream_src[0],
582 							hpo_dp_le_state.slot_count[0],
583 							hpo_dp_le_state.vc_rate_x[0],
584 							hpo_dp_le_state.vc_rate_y[0]);
585 					DTN_INFO("\n");
586 				}
587 			}
588 
589 			DTN_INFO("\n");
590 		}
591 	}
592 
593 	DTN_INFO_END();
594 }
595 
dcn10_did_underflow_occur(struct dc * dc,struct pipe_ctx * pipe_ctx)596 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
597 {
598 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
599 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
600 
601 	if (tg->funcs->is_optc_underflow_occurred(tg)) {
602 		tg->funcs->clear_optc_underflow(tg);
603 		return true;
604 	}
605 
606 	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
607 		hubp->funcs->hubp_clear_underflow(hubp);
608 		return true;
609 	}
610 	return false;
611 }
612 
dcn10_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)613 void dcn10_enable_power_gating_plane(
614 	struct dce_hwseq *hws,
615 	bool enable)
616 {
617 	bool force_on = true; /* disable power gating */
618 
619 	if (enable)
620 		force_on = false;
621 
622 	/* DCHUBP0/1/2/3 */
623 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
624 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
625 	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
626 	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
627 
628 	/* DPP0/1/2/3 */
629 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
630 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
631 	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
632 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
633 }
634 
dcn10_disable_vga(struct dce_hwseq * hws)635 void dcn10_disable_vga(
636 	struct dce_hwseq *hws)
637 {
638 	unsigned int in_vga1_mode = 0;
639 	unsigned int in_vga2_mode = 0;
640 	unsigned int in_vga3_mode = 0;
641 	unsigned int in_vga4_mode = 0;
642 
643 	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
644 	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
645 	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
646 	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
647 
648 	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
649 			in_vga3_mode == 0 && in_vga4_mode == 0)
650 		return;
651 
652 	REG_WRITE(D1VGA_CONTROL, 0);
653 	REG_WRITE(D2VGA_CONTROL, 0);
654 	REG_WRITE(D3VGA_CONTROL, 0);
655 	REG_WRITE(D4VGA_CONTROL, 0);
656 
657 	/* HW Engineer's Notes:
658 	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
659 	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
660 	 *
661 	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
662 	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
663 	 */
664 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
665 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
666 }
667 
668 /**
669  * dcn10_dpp_pg_control - DPP power gate control.
670  *
671  * @hws: dce_hwseq reference.
672  * @dpp_inst: DPP instance reference.
673  * @power_on: true if we want to enable power gate, false otherwise.
674  *
675  * Enable or disable power gate in the specific DPP instance.
676  */
dcn10_dpp_pg_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool power_on)677 void dcn10_dpp_pg_control(
678 		struct dce_hwseq *hws,
679 		unsigned int dpp_inst,
680 		bool power_on)
681 {
682 	uint32_t power_gate = power_on ? 0 : 1;
683 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
684 
685 	if (hws->ctx->dc->debug.disable_dpp_power_gate)
686 		return;
687 	if (REG(DOMAIN1_PG_CONFIG) == 0)
688 		return;
689 
690 	switch (dpp_inst) {
691 	case 0: /* DPP0 */
692 		REG_UPDATE(DOMAIN1_PG_CONFIG,
693 				DOMAIN1_POWER_GATE, power_gate);
694 
695 		REG_WAIT(DOMAIN1_PG_STATUS,
696 				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
697 				1, 1000);
698 		break;
699 	case 1: /* DPP1 */
700 		REG_UPDATE(DOMAIN3_PG_CONFIG,
701 				DOMAIN3_POWER_GATE, power_gate);
702 
703 		REG_WAIT(DOMAIN3_PG_STATUS,
704 				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
705 				1, 1000);
706 		break;
707 	case 2: /* DPP2 */
708 		REG_UPDATE(DOMAIN5_PG_CONFIG,
709 				DOMAIN5_POWER_GATE, power_gate);
710 
711 		REG_WAIT(DOMAIN5_PG_STATUS,
712 				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
713 				1, 1000);
714 		break;
715 	case 3: /* DPP3 */
716 		REG_UPDATE(DOMAIN7_PG_CONFIG,
717 				DOMAIN7_POWER_GATE, power_gate);
718 
719 		REG_WAIT(DOMAIN7_PG_STATUS,
720 				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
721 				1, 1000);
722 		break;
723 	default:
724 		BREAK_TO_DEBUGGER();
725 		break;
726 	}
727 }
728 
729 /**
730  * dcn10_hubp_pg_control - HUBP power gate control.
731  *
732  * @hws: dce_hwseq reference.
733  * @hubp_inst: DPP instance reference.
734  * @power_on: true if we want to enable power gate, false otherwise.
735  *
736  * Enable or disable power gate in the specific HUBP instance.
737  */
dcn10_hubp_pg_control(struct dce_hwseq * hws,unsigned int hubp_inst,bool power_on)738 void dcn10_hubp_pg_control(
739 		struct dce_hwseq *hws,
740 		unsigned int hubp_inst,
741 		bool power_on)
742 {
743 	uint32_t power_gate = power_on ? 0 : 1;
744 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
745 
746 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
747 		return;
748 	if (REG(DOMAIN0_PG_CONFIG) == 0)
749 		return;
750 
751 	switch (hubp_inst) {
752 	case 0: /* DCHUBP0 */
753 		REG_UPDATE(DOMAIN0_PG_CONFIG,
754 				DOMAIN0_POWER_GATE, power_gate);
755 
756 		REG_WAIT(DOMAIN0_PG_STATUS,
757 				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
758 				1, 1000);
759 		break;
760 	case 1: /* DCHUBP1 */
761 		REG_UPDATE(DOMAIN2_PG_CONFIG,
762 				DOMAIN2_POWER_GATE, power_gate);
763 
764 		REG_WAIT(DOMAIN2_PG_STATUS,
765 				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
766 				1, 1000);
767 		break;
768 	case 2: /* DCHUBP2 */
769 		REG_UPDATE(DOMAIN4_PG_CONFIG,
770 				DOMAIN4_POWER_GATE, power_gate);
771 
772 		REG_WAIT(DOMAIN4_PG_STATUS,
773 				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
774 				1, 1000);
775 		break;
776 	case 3: /* DCHUBP3 */
777 		REG_UPDATE(DOMAIN6_PG_CONFIG,
778 				DOMAIN6_POWER_GATE, power_gate);
779 
780 		REG_WAIT(DOMAIN6_PG_STATUS,
781 				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
782 				1, 1000);
783 		break;
784 	default:
785 		BREAK_TO_DEBUGGER();
786 		break;
787 	}
788 }
789 
power_on_plane_resources(struct dce_hwseq * hws,int plane_id)790 static void power_on_plane_resources(
791 	struct dce_hwseq *hws,
792 	int plane_id)
793 {
794 	DC_LOGGER_INIT(hws->ctx->logger);
795 
796 	if (hws->funcs.dpp_root_clock_control)
797 		hws->funcs.dpp_root_clock_control(hws, plane_id, true);
798 
799 	if (REG(DC_IP_REQUEST_CNTL)) {
800 		REG_SET(DC_IP_REQUEST_CNTL, 0,
801 				IP_REQUEST_EN, 1);
802 
803 		if (hws->funcs.dpp_pg_control)
804 			hws->funcs.dpp_pg_control(hws, plane_id, true);
805 
806 		if (hws->funcs.hubp_pg_control)
807 			hws->funcs.hubp_pg_control(hws, plane_id, true);
808 
809 		REG_SET(DC_IP_REQUEST_CNTL, 0,
810 				IP_REQUEST_EN, 0);
811 		DC_LOG_DEBUG(
812 				"Un-gated front end for pipe %d\n", plane_id);
813 	}
814 }
815 
undo_DEGVIDCN10_253_wa(struct dc * dc)816 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
817 {
818 	struct dce_hwseq *hws = dc->hwseq;
819 	struct hubp *hubp = dc->res_pool->hubps[0];
820 
821 	if (!hws->wa_state.DEGVIDCN10_253_applied)
822 		return;
823 
824 	hubp->funcs->set_blank(hubp, true);
825 
826 	REG_SET(DC_IP_REQUEST_CNTL, 0,
827 			IP_REQUEST_EN, 1);
828 
829 	hws->funcs.hubp_pg_control(hws, 0, false);
830 	REG_SET(DC_IP_REQUEST_CNTL, 0,
831 			IP_REQUEST_EN, 0);
832 
833 	hws->wa_state.DEGVIDCN10_253_applied = false;
834 }
835 
apply_DEGVIDCN10_253_wa(struct dc * dc)836 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
837 {
838 	struct dce_hwseq *hws = dc->hwseq;
839 	struct hubp *hubp = dc->res_pool->hubps[0];
840 	int i;
841 
842 	if (dc->debug.disable_stutter)
843 		return;
844 
845 	if (!hws->wa.DEGVIDCN10_253)
846 		return;
847 
848 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
849 		if (!dc->res_pool->hubps[i]->power_gated)
850 			return;
851 	}
852 
853 	/* all pipe power gated, apply work around to enable stutter. */
854 
855 	REG_SET(DC_IP_REQUEST_CNTL, 0,
856 			IP_REQUEST_EN, 1);
857 
858 	hws->funcs.hubp_pg_control(hws, 0, true);
859 	REG_SET(DC_IP_REQUEST_CNTL, 0,
860 			IP_REQUEST_EN, 0);
861 
862 	hubp->funcs->set_hubp_blank_en(hubp, false);
863 	hws->wa_state.DEGVIDCN10_253_applied = true;
864 }
865 
dcn10_bios_golden_init(struct dc * dc)866 void dcn10_bios_golden_init(struct dc *dc)
867 {
868 	struct dce_hwseq *hws = dc->hwseq;
869 	struct dc_bios *bp = dc->ctx->dc_bios;
870 	int i;
871 	bool allow_self_fresh_force_enable = true;
872 
873 	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
874 		return;
875 
876 	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
877 		allow_self_fresh_force_enable =
878 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
879 
880 
881 	/* WA for making DF sleep when idle after resume from S0i3.
882 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
883 	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
884 	 * before calling command table and it changed to 1 after,
885 	 * it should be set back to 0.
886 	 */
887 
888 	/* initialize dcn global */
889 	bp->funcs->enable_disp_power_gating(bp,
890 			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
891 
892 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
893 		/* initialize dcn per pipe */
894 		bp->funcs->enable_disp_power_gating(bp,
895 				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
896 	}
897 
898 	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
899 		if (allow_self_fresh_force_enable == false &&
900 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
901 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
902 										!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
903 
904 }
905 
false_optc_underflow_wa(struct dc * dc,const struct dc_stream_state * stream,struct timing_generator * tg)906 static void false_optc_underflow_wa(
907 		struct dc *dc,
908 		const struct dc_stream_state *stream,
909 		struct timing_generator *tg)
910 {
911 	int i;
912 	bool underflow;
913 
914 	if (!dc->hwseq->wa.false_optc_underflow)
915 		return;
916 
917 	underflow = tg->funcs->is_optc_underflow_occurred(tg);
918 
919 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
920 		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
921 
922 		if (old_pipe_ctx->stream != stream)
923 			continue;
924 
925 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
926 	}
927 
928 	if (tg->funcs->set_blank_data_double_buffer)
929 		tg->funcs->set_blank_data_double_buffer(tg, true);
930 
931 	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
932 		tg->funcs->clear_optc_underflow(tg);
933 }
934 
calculate_vready_offset_for_group(struct pipe_ctx * pipe)935 static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
936 {
937 	struct pipe_ctx *other_pipe;
938 	int vready_offset = pipe->pipe_dlg_param.vready_offset;
939 
940 	/* Always use the largest vready_offset of all connected pipes */
941 	for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
942 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
943 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
944 	}
945 	for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
946 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
947 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
948 	}
949 	for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
950 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
951 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
952 	}
953 	for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
954 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
955 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
956 	}
957 
958 	return vready_offset;
959 }
960 
dcn10_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)961 enum dc_status dcn10_enable_stream_timing(
962 		struct pipe_ctx *pipe_ctx,
963 		struct dc_state *context,
964 		struct dc *dc)
965 {
966 	struct dc_stream_state *stream = pipe_ctx->stream;
967 	enum dc_color_space color_space;
968 	struct tg_color black_color = {0};
969 
970 	/* by upper caller loop, pipe0 is parent pipe and be called first.
971 	 * back end is set up by for pipe0. Other children pipe share back end
972 	 * with pipe 0. No program is needed.
973 	 */
974 	if (pipe_ctx->top_pipe != NULL)
975 		return DC_OK;
976 
977 	/* TODO check if timing_changed, disable stream if timing changed */
978 
979 	/* HW program guide assume display already disable
980 	 * by unplug sequence. OTG assume stop.
981 	 */
982 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
983 
984 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
985 			pipe_ctx->clock_source,
986 			&pipe_ctx->stream_res.pix_clk_params,
987 			dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
988 			&pipe_ctx->pll_settings)) {
989 		BREAK_TO_DEBUGGER();
990 		return DC_ERROR_UNEXPECTED;
991 	}
992 
993 	if (dc_is_hdmi_tmds_signal(stream->signal)) {
994 		stream->link->phy_state.symclk_ref_cnts.otg = 1;
995 		if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
996 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
997 		else
998 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
999 	}
1000 
1001 	pipe_ctx->stream_res.tg->funcs->program_timing(
1002 			pipe_ctx->stream_res.tg,
1003 			&stream->timing,
1004 			calculate_vready_offset_for_group(pipe_ctx),
1005 			pipe_ctx->pipe_dlg_param.vstartup_start,
1006 			pipe_ctx->pipe_dlg_param.vupdate_offset,
1007 			pipe_ctx->pipe_dlg_param.vupdate_width,
1008 			pipe_ctx->pipe_dlg_param.pstate_keepout,
1009 			pipe_ctx->stream->signal,
1010 			true);
1011 
1012 #if 0 /* move to after enable_crtc */
1013 	/* TODO: OPP FMT, ABM. etc. should be done here. */
1014 	/* or FPGA now. instance 0 only. TODO: move to opp.c */
1015 
1016 	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
1017 
1018 	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
1019 				pipe_ctx->stream_res.opp,
1020 				&stream->bit_depth_params,
1021 				&stream->clamping);
1022 #endif
1023 	/* program otg blank color */
1024 	color_space = stream->output_color_space;
1025 	color_space_to_black_color(dc, color_space, &black_color);
1026 
1027 	/*
1028 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
1029 	 * alternate between Cb and Cr, so both channels need the pixel
1030 	 * value for Y
1031 	 */
1032 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
1033 		black_color.color_r_cr = black_color.color_g_y;
1034 
1035 	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
1036 		pipe_ctx->stream_res.tg->funcs->set_blank_color(
1037 				pipe_ctx->stream_res.tg,
1038 				&black_color);
1039 
1040 	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
1041 			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
1042 		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
1043 		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
1044 		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
1045 	}
1046 
1047 	/* VTG is  within DCHUB command block. DCFCLK is always on */
1048 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
1049 		BREAK_TO_DEBUGGER();
1050 		return DC_ERROR_UNEXPECTED;
1051 	}
1052 
1053 	/* TODO program crtc source select for non-virtual signal*/
1054 	/* TODO program FMT */
1055 	/* TODO setup link_enc */
1056 	/* TODO set stream attributes */
1057 	/* TODO program audio */
1058 	/* TODO enable stream if timing changed */
1059 	/* TODO unblank stream if DP */
1060 
1061 	return DC_OK;
1062 }
1063 
dcn10_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1064 static void dcn10_reset_back_end_for_pipe(
1065 		struct dc *dc,
1066 		struct pipe_ctx *pipe_ctx,
1067 		struct dc_state *context)
1068 {
1069 	int i;
1070 	struct dc_link *link;
1071 	DC_LOGGER_INIT(dc->ctx->logger);
1072 	if (pipe_ctx->stream_res.stream_enc == NULL) {
1073 		pipe_ctx->stream = NULL;
1074 		return;
1075 	}
1076 
1077 	link = pipe_ctx->stream->link;
1078 	/* DPMS may already disable or */
1079 	/* dpms_off status is incorrect due to fastboot
1080 	 * feature. When system resume from S4 with second
1081 	 * screen only, the dpms_off would be true but
1082 	 * VBIOS lit up eDP, so check link status too.
1083 	 */
1084 	if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1085 		dc->link_srv->set_dpms_off(pipe_ctx);
1086 	else if (pipe_ctx->stream_res.audio)
1087 		dc->hwss.disable_audio_stream(pipe_ctx);
1088 
1089 	if (pipe_ctx->stream_res.audio) {
1090 		/*disable az_endpoint*/
1091 		pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1092 
1093 		/*free audio*/
1094 		if (dc->caps.dynamic_audio == true) {
1095 			/*we have to dynamic arbitrate the audio endpoints*/
1096 			/*we free the resource, need reset is_audio_acquired*/
1097 			update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1098 					pipe_ctx->stream_res.audio, false);
1099 			pipe_ctx->stream_res.audio = NULL;
1100 		}
1101 	}
1102 
1103 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
1104 	 * back end share by all pipes and will be disable only when disable
1105 	 * parent pipe.
1106 	 */
1107 	if (pipe_ctx->top_pipe == NULL) {
1108 
1109 		if (pipe_ctx->stream_res.abm)
1110 			dc->hwss.set_abm_immediate_disable(pipe_ctx);
1111 
1112 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1113 
1114 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1115 		if (pipe_ctx->stream_res.tg->funcs->set_drr)
1116 			pipe_ctx->stream_res.tg->funcs->set_drr(
1117 					pipe_ctx->stream_res.tg, NULL);
1118 		if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
1119 			pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1120 	}
1121 
1122 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1123 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1124 			break;
1125 
1126 	if (i == dc->res_pool->pipe_count)
1127 		return;
1128 
1129 	pipe_ctx->stream = NULL;
1130 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1131 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1132 }
1133 
dcn10_hw_wa_force_recovery(struct dc * dc)1134 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1135 {
1136 	struct hubp *hubp ;
1137 	unsigned int i;
1138 
1139 	if (!dc->debug.recovery_enabled)
1140 		return false;
1141 	/*
1142 	DCHUBP_CNTL:HUBP_BLANK_EN=1
1143 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1144 	DCHUBP_CNTL:HUBP_DISABLE=1
1145 	DCHUBP_CNTL:HUBP_DISABLE=0
1146 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1147 	DCSURF_PRIMARY_SURFACE_ADDRESS
1148 	DCHUBP_CNTL:HUBP_BLANK_EN=0
1149 	*/
1150 
1151 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1152 		struct pipe_ctx *pipe_ctx =
1153 			&dc->current_state->res_ctx.pipe_ctx[i];
1154 		if (pipe_ctx != NULL) {
1155 			hubp = pipe_ctx->plane_res.hubp;
1156 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1157 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1158 				hubp->funcs->set_hubp_blank_en(hubp, true);
1159 		}
1160 	}
1161 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1162 	hubbub1_soft_reset(dc->res_pool->hubbub, true);
1163 
1164 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1165 		struct pipe_ctx *pipe_ctx =
1166 			&dc->current_state->res_ctx.pipe_ctx[i];
1167 		if (pipe_ctx != NULL) {
1168 			hubp = pipe_ctx->plane_res.hubp;
1169 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
1170 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1171 				hubp->funcs->hubp_disable_control(hubp, true);
1172 		}
1173 	}
1174 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1175 		struct pipe_ctx *pipe_ctx =
1176 			&dc->current_state->res_ctx.pipe_ctx[i];
1177 		if (pipe_ctx != NULL) {
1178 			hubp = pipe_ctx->plane_res.hubp;
1179 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
1180 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1181 				hubp->funcs->hubp_disable_control(hubp, true);
1182 		}
1183 	}
1184 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1185 	hubbub1_soft_reset(dc->res_pool->hubbub, false);
1186 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1187 		struct pipe_ctx *pipe_ctx =
1188 			&dc->current_state->res_ctx.pipe_ctx[i];
1189 		if (pipe_ctx != NULL) {
1190 			hubp = pipe_ctx->plane_res.hubp;
1191 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1192 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1193 				hubp->funcs->set_hubp_blank_en(hubp, true);
1194 		}
1195 	}
1196 	return true;
1197 
1198 }
1199 
dcn10_verify_allow_pstate_change_high(struct dc * dc)1200 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1201 {
1202 	struct hubbub *hubbub = dc->res_pool->hubbub;
1203 	static bool should_log_hw_state; /* prevent hw state log by default */
1204 
1205 	if (!hubbub->funcs->verify_allow_pstate_change_high)
1206 		return;
1207 
1208 	if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1209 		int i = 0;
1210 
1211 		if (should_log_hw_state)
1212 			dcn10_log_hw_state(dc, NULL);
1213 
1214 		TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1215 		BREAK_TO_DEBUGGER();
1216 		if (dcn10_hw_wa_force_recovery(dc)) {
1217 			/*check again*/
1218 			if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1219 				BREAK_TO_DEBUGGER();
1220 		}
1221 	}
1222 }
1223 
1224 /* trigger HW to start disconnect plane from stream on the next vsync */
dcn10_plane_atomic_disconnect(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx)1225 void dcn10_plane_atomic_disconnect(struct dc *dc,
1226 		struct dc_state *state,
1227 		struct pipe_ctx *pipe_ctx)
1228 {
1229 	struct dce_hwseq *hws = dc->hwseq;
1230 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1231 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
1232 	struct mpc *mpc = dc->res_pool->mpc;
1233 	struct mpc_tree *mpc_tree_params;
1234 	struct mpcc *mpcc_to_remove = NULL;
1235 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1236 
1237 	mpc_tree_params = &(opp->mpc_tree_params);
1238 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1239 
1240 	/*Already reset*/
1241 	if (mpcc_to_remove == NULL)
1242 		return;
1243 
1244 	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1245 	// Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1246 	// so don't wait for MPCC_IDLE in the programming sequence
1247 	if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM)
1248 		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1249 
1250 	dc->optimized_required = true;
1251 
1252 	if (hubp->funcs->hubp_disconnect)
1253 		hubp->funcs->hubp_disconnect(hubp);
1254 
1255 	if (dc->debug.sanity_checks)
1256 		hws->funcs.verify_allow_pstate_change_high(dc);
1257 }
1258 
1259 /**
1260  * dcn10_plane_atomic_power_down - Power down plane components.
1261  *
1262  * @dc: dc struct reference. used for grab hwseq.
1263  * @dpp: dpp struct reference.
1264  * @hubp: hubp struct reference.
1265  *
1266  * Keep in mind that this operation requires a power gate configuration;
1267  * however, requests for switch power gate are precisely controlled to avoid
1268  * problems. For this reason, power gate request is usually disabled. This
1269  * function first needs to enable the power gate request before disabling DPP
1270  * and HUBP. Finally, it disables the power gate request again.
1271  */
dcn10_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)1272 void dcn10_plane_atomic_power_down(struct dc *dc,
1273 		struct dpp *dpp,
1274 		struct hubp *hubp)
1275 {
1276 	struct dce_hwseq *hws = dc->hwseq;
1277 	DC_LOGGER_INIT(dc->ctx->logger);
1278 
1279 	if (REG(DC_IP_REQUEST_CNTL)) {
1280 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1281 				IP_REQUEST_EN, 1);
1282 
1283 		if (hws->funcs.dpp_pg_control)
1284 			hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1285 
1286 		if (hws->funcs.hubp_pg_control)
1287 			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1288 
1289 		hubp->funcs->hubp_reset(hubp);
1290 		dpp->funcs->dpp_reset(dpp);
1291 
1292 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1293 				IP_REQUEST_EN, 0);
1294 		DC_LOG_DEBUG(
1295 				"Power gated front end %d\n", hubp->inst);
1296 	}
1297 
1298 	if (hws->funcs.dpp_root_clock_control)
1299 		hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
1300 }
1301 
1302 /* disable HW used by plane.
1303  * note:  cannot disable until disconnect is complete
1304  */
dcn10_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)1305 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1306 {
1307 	struct dce_hwseq *hws = dc->hwseq;
1308 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1309 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1310 	int opp_id = hubp->opp_id;
1311 
1312 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1313 
1314 	hubp->funcs->hubp_clk_cntl(hubp, false);
1315 
1316 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
1317 
1318 	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1319 		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1320 				pipe_ctx->stream_res.opp,
1321 				false);
1322 
1323 	hubp->power_gated = true;
1324 	dc->optimized_required = false; /* We're powering off, no need to optimize */
1325 
1326 	hws->funcs.plane_atomic_power_down(dc,
1327 			pipe_ctx->plane_res.dpp,
1328 			pipe_ctx->plane_res.hubp);
1329 
1330 	pipe_ctx->stream = NULL;
1331 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1332 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1333 	pipe_ctx->top_pipe = NULL;
1334 	pipe_ctx->bottom_pipe = NULL;
1335 	pipe_ctx->plane_state = NULL;
1336 }
1337 
dcn10_disable_plane(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx)1338 void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
1339 {
1340 	struct dce_hwseq *hws = dc->hwseq;
1341 	DC_LOGGER_INIT(dc->ctx->logger);
1342 
1343 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1344 		return;
1345 
1346 	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1347 
1348 	apply_DEGVIDCN10_253_wa(dc);
1349 
1350 	DC_LOG_DC("Power down front end %d\n",
1351 					pipe_ctx->pipe_idx);
1352 }
1353 
dcn10_init_pipes(struct dc * dc,struct dc_state * context)1354 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1355 {
1356 	int i;
1357 	struct dce_hwseq *hws = dc->hwseq;
1358 	struct hubbub *hubbub = dc->res_pool->hubbub;
1359 	bool can_apply_seamless_boot = false;
1360 	bool tg_enabled[MAX_PIPES] = {false};
1361 
1362 	for (i = 0; i < context->stream_count; i++) {
1363 		if (context->streams[i]->apply_seamless_boot_optimization) {
1364 			can_apply_seamless_boot = true;
1365 			break;
1366 		}
1367 	}
1368 
1369 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1370 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1371 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1372 
1373 		/* There is assumption that pipe_ctx is not mapping irregularly
1374 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1375 		 * we will use the pipe, so don't disable
1376 		 */
1377 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1378 			continue;
1379 
1380 		/* Blank controller using driver code instead of
1381 		 * command table.
1382 		 */
1383 		if (tg->funcs->is_tg_enabled(tg)) {
1384 			if (hws->funcs.init_blank != NULL) {
1385 				hws->funcs.init_blank(dc, tg);
1386 				tg->funcs->lock(tg);
1387 			} else {
1388 				tg->funcs->lock(tg);
1389 				tg->funcs->set_blank(tg, true);
1390 				hwss_wait_for_blank_complete(tg);
1391 			}
1392 		}
1393 	}
1394 
1395 	/* Reset det size */
1396 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1397 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1398 		struct hubp *hubp = dc->res_pool->hubps[i];
1399 
1400 		/* Do not need to reset for seamless boot */
1401 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1402 			continue;
1403 
1404 		if (hubbub && hubp) {
1405 			if (hubbub->funcs->program_det_size)
1406 				hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1407 			if (hubbub->funcs->program_det_segments)
1408 				hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0);
1409 		}
1410 	}
1411 
1412 	/* num_opp will be equal to number of mpcc */
1413 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1414 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1415 
1416 		/* Cannot reset the MPC mux if seamless boot */
1417 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1418 			continue;
1419 
1420 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
1421 				dc->res_pool->mpc, i);
1422 	}
1423 
1424 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1425 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1426 		struct hubp *hubp = dc->res_pool->hubps[i];
1427 		struct dpp *dpp = dc->res_pool->dpps[i];
1428 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1429 
1430 		/* There is assumption that pipe_ctx is not mapping irregularly
1431 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1432 		 * we will use the pipe, so don't disable
1433 		 */
1434 		if (can_apply_seamless_boot &&
1435 			pipe_ctx->stream != NULL &&
1436 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1437 				pipe_ctx->stream_res.tg)) {
1438 			// Enable double buffering for OTG_BLANK no matter if
1439 			// seamless boot is enabled or not to suppress global sync
1440 			// signals when OTG blanked. This is to prevent pipe from
1441 			// requesting data while in PSR.
1442 			tg->funcs->tg_init(tg);
1443 			hubp->power_gated = true;
1444 			tg_enabled[i] = true;
1445 			continue;
1446 		}
1447 
1448 		/* Disable on the current state so the new one isn't cleared. */
1449 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1450 
1451 		hubp->funcs->hubp_reset(hubp);
1452 		dpp->funcs->dpp_reset(dpp);
1453 
1454 		pipe_ctx->stream_res.tg = tg;
1455 		pipe_ctx->pipe_idx = i;
1456 
1457 		pipe_ctx->plane_res.hubp = hubp;
1458 		pipe_ctx->plane_res.dpp = dpp;
1459 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1460 		hubp->mpcc_id = dpp->inst;
1461 		hubp->opp_id = OPP_ID_INVALID;
1462 		hubp->power_gated = false;
1463 
1464 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1465 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1466 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1467 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1468 
1469 		hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
1470 
1471 		if (tg->funcs->is_tg_enabled(tg))
1472 			tg->funcs->unlock(tg);
1473 
1474 		dc->hwss.disable_plane(dc, context, pipe_ctx);
1475 
1476 		pipe_ctx->stream_res.tg = NULL;
1477 		pipe_ctx->plane_res.hubp = NULL;
1478 
1479 		if (tg->funcs->is_tg_enabled(tg)) {
1480 			if (tg->funcs->init_odm)
1481 				tg->funcs->init_odm(tg);
1482 		}
1483 
1484 		tg->funcs->tg_init(tg);
1485 	}
1486 
1487 	/* Clean up MPC tree */
1488 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1489 		if (tg_enabled[i]) {
1490 			if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) {
1491 				if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) {
1492 					int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;
1493 
1494 					if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id]))
1495 						dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1496 				}
1497 			}
1498 		}
1499 	}
1500 
1501 	/* Power gate DSCs */
1502 	if (hws->funcs.dsc_pg_control != NULL) {
1503 		uint32_t num_opps = 0;
1504 		uint32_t opp_id_src0 = OPP_ID_INVALID;
1505 		uint32_t opp_id_src1 = OPP_ID_INVALID;
1506 
1507 		// Step 1: To find out which OPTC is running & OPTC DSC is ON
1508 		// We can't use res_pool->res_cap->num_timing_generator to check
1509 		// Because it records display pipes default setting built in driver,
1510 		// not display pipes of the current chip.
1511 		// Some ASICs would be fused display pipes less than the default setting.
1512 		// In dcnxx_resource_construct function, driver would obatin real information.
1513 		for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1514 			uint32_t optc_dsc_state = 0;
1515 			struct timing_generator *tg = dc->res_pool->timing_generators[i];
1516 
1517 			if (tg->funcs->is_tg_enabled(tg)) {
1518 				if (tg->funcs->get_dsc_status)
1519 					tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1520 				// Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1521 				// non-zero value is DSC enabled
1522 				if (optc_dsc_state != 0) {
1523 					tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1524 					break;
1525 				}
1526 			}
1527 		}
1528 
1529 		// Step 2: To power down DSC but skip DSC  of running OPTC
1530 		for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1531 			struct dcn_dsc_state s  = {0};
1532 
1533 			dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1534 
1535 			if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1536 				s.dsc_clock_en && s.dsc_fw_en)
1537 				continue;
1538 
1539 			hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1540 		}
1541 	}
1542 }
1543 
dcn10_init_hw(struct dc * dc)1544 void dcn10_init_hw(struct dc *dc)
1545 {
1546 	int i;
1547 	struct abm *abm = dc->res_pool->abm;
1548 	struct dmcu *dmcu = dc->res_pool->dmcu;
1549 	struct dce_hwseq *hws = dc->hwseq;
1550 	struct dc_bios *dcb = dc->ctx->dc_bios;
1551 	struct resource_pool *res_pool = dc->res_pool;
1552 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1553 	uint32_t user_level = MAX_BACKLIGHT_LEVEL;
1554 	bool   is_optimized_init_done = false;
1555 
1556 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1557 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1558 
1559 	/* Align bw context with hw config when system resume. */
1560 	if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1561 		dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1562 		dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1563 	}
1564 
1565 	// Initialize the dccg
1566 	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1567 		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1568 
1569 	if (!dcb->funcs->is_accelerated_mode(dcb))
1570 		hws->funcs.disable_vga(dc->hwseq);
1571 
1572 	if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv))
1573 		hws->funcs.bios_golden_init(dc);
1574 
1575 
1576 	if (dc->ctx->dc_bios->fw_info_valid) {
1577 		res_pool->ref_clocks.xtalin_clock_inKhz =
1578 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1579 
1580 		if (res_pool->dccg && res_pool->hubbub) {
1581 
1582 			(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1583 					dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1584 					&res_pool->ref_clocks.dccg_ref_clock_inKhz);
1585 
1586 			(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1587 					res_pool->ref_clocks.dccg_ref_clock_inKhz,
1588 					&res_pool->ref_clocks.dchub_ref_clock_inKhz);
1589 		} else {
1590 			// Not all ASICs have DCCG sw component
1591 			res_pool->ref_clocks.dccg_ref_clock_inKhz =
1592 					res_pool->ref_clocks.xtalin_clock_inKhz;
1593 			res_pool->ref_clocks.dchub_ref_clock_inKhz =
1594 					res_pool->ref_clocks.xtalin_clock_inKhz;
1595 		}
1596 	} else
1597 		ASSERT_CRITICAL(false);
1598 
1599 	for (i = 0; i < dc->link_count; i++) {
1600 		/* Power up AND update implementation according to the
1601 		 * required signal (which may be different from the
1602 		 * default signal on connector).
1603 		 */
1604 		struct dc_link *link = dc->links[i];
1605 
1606 		if (!is_optimized_init_done)
1607 			link->link_enc->funcs->hw_init(link->link_enc);
1608 
1609 		/* Check for enabled DIG to identify enabled display */
1610 		if (link->link_enc->funcs->is_dig_enabled &&
1611 			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1612 			link->link_status.link_active = true;
1613 			if (link->link_enc->funcs->fec_is_active &&
1614 					link->link_enc->funcs->fec_is_active(link->link_enc))
1615 				link->fec_state = dc_link_fec_enabled;
1616 		}
1617 	}
1618 
1619 	/* we want to turn off all dp displays before doing detection */
1620 	dc->link_srv->blank_all_dp_displays(dc);
1621 
1622 	if (hws->funcs.enable_power_gating_plane)
1623 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1624 
1625 	/* If taking control over from VBIOS, we may want to optimize our first
1626 	 * mode set, so we need to skip powering down pipes until we know which
1627 	 * pipes we want to use.
1628 	 * Otherwise, if taking control is not possible, we need to power
1629 	 * everything down.
1630 	 */
1631 	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1632 		if (!is_optimized_init_done) {
1633 			hws->funcs.init_pipes(dc, dc->current_state);
1634 			if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1635 				dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1636 						!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1637 		}
1638 	}
1639 
1640 	if (!is_optimized_init_done) {
1641 
1642 		for (i = 0; i < res_pool->audio_count; i++) {
1643 			struct audio *audio = res_pool->audios[i];
1644 
1645 			audio->funcs->hw_init(audio);
1646 		}
1647 
1648 		for (i = 0; i < dc->link_count; i++) {
1649 			struct dc_link *link = dc->links[i];
1650 
1651 			if (link->panel_cntl) {
1652 				backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1653 				user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
1654 			}
1655 		}
1656 
1657 		if (abm != NULL)
1658 			abm->funcs->abm_init(abm, backlight, user_level);
1659 
1660 		if (dmcu != NULL && !dmcu->auto_load_dmcu)
1661 			dmcu->funcs->dmcu_init(dmcu);
1662 	}
1663 
1664 	if (abm != NULL && dmcu != NULL)
1665 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1666 
1667 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1668 	if (!is_optimized_init_done)
1669 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1670 
1671 	if (!dc->debug.disable_clock_gate) {
1672 		/* enable all DCN clock gating */
1673 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1674 
1675 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1676 
1677 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1678 	}
1679 
1680 	if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges)
1681 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1682 }
1683 
1684 /* In headless boot cases, DIG may be turned
1685  * on which causes HW/SW discrepancies.
1686  * To avoid this, power down hardware on boot
1687  * if DIG is turned on
1688  */
dcn10_power_down_on_boot(struct dc * dc)1689 void dcn10_power_down_on_boot(struct dc *dc)
1690 {
1691 	struct dc_link *edp_links[MAX_NUM_EDP];
1692 	struct dc_link *edp_link = NULL;
1693 	int edp_num;
1694 	int i = 0;
1695 
1696 	dc_get_edp_links(dc, edp_links, &edp_num);
1697 	if (edp_num)
1698 		edp_link = edp_links[0];
1699 
1700 	if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1701 			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1702 			dc->hwseq->funcs.edp_backlight_control &&
1703 			dc->hwseq->funcs.power_down &&
1704 			dc->hwss.edp_power_control) {
1705 		dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1706 		dc->hwseq->funcs.power_down(dc);
1707 		dc->hwss.edp_power_control(edp_link, false);
1708 	} else {
1709 		for (i = 0; i < dc->link_count; i++) {
1710 			struct dc_link *link = dc->links[i];
1711 
1712 			if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1713 					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1714 					dc->hwseq->funcs.power_down) {
1715 				dc->hwseq->funcs.power_down(dc);
1716 				break;
1717 			}
1718 
1719 		}
1720 	}
1721 
1722 	/*
1723 	 * Call update_clocks with empty context
1724 	 * to send DISPLAY_OFF
1725 	 * Otherwise DISPLAY_OFF may not be asserted
1726 	 */
1727 	if (dc->clk_mgr->funcs->set_low_power_state)
1728 		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1729 }
1730 
dcn10_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1731 void dcn10_reset_hw_ctx_wrap(
1732 		struct dc *dc,
1733 		struct dc_state *context)
1734 {
1735 	int i;
1736 	struct dce_hwseq *hws = dc->hwseq;
1737 
1738 	/* Reset Back End*/
1739 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1740 		struct pipe_ctx *pipe_ctx_old =
1741 			&dc->current_state->res_ctx.pipe_ctx[i];
1742 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1743 
1744 		if (!pipe_ctx_old->stream)
1745 			continue;
1746 
1747 		if (pipe_ctx_old->top_pipe)
1748 			continue;
1749 
1750 		if (!pipe_ctx->stream ||
1751 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1752 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1753 
1754 			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1755 			if (hws->funcs.enable_stream_gating)
1756 				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1757 			if (old_clk)
1758 				old_clk->funcs->cs_power_down(old_clk);
1759 		}
1760 	}
1761 }
1762 
patch_address_for_sbs_tb_stereo(struct pipe_ctx * pipe_ctx,PHYSICAL_ADDRESS_LOC * addr)1763 static bool patch_address_for_sbs_tb_stereo(
1764 		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1765 {
1766 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1767 	bool sec_split = pipe_ctx->top_pipe &&
1768 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1769 	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1770 		(pipe_ctx->stream->timing.timing_3d_format ==
1771 		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1772 		 pipe_ctx->stream->timing.timing_3d_format ==
1773 		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1774 		*addr = plane_state->address.grph_stereo.left_addr;
1775 		plane_state->address.grph_stereo.left_addr =
1776 		plane_state->address.grph_stereo.right_addr;
1777 		return true;
1778 	} else {
1779 		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1780 			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1781 			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1782 			plane_state->address.grph_stereo.right_addr =
1783 			plane_state->address.grph_stereo.left_addr;
1784 			plane_state->address.grph_stereo.right_meta_addr =
1785 			plane_state->address.grph_stereo.left_meta_addr;
1786 		}
1787 	}
1788 	return false;
1789 }
1790 
dcn10_update_plane_addr(const struct dc * dc,struct pipe_ctx * pipe_ctx)1791 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1792 {
1793 	bool addr_patched = false;
1794 	PHYSICAL_ADDRESS_LOC addr;
1795 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1796 
1797 	if (plane_state == NULL)
1798 		return;
1799 
1800 	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1801 
1802 	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1803 			pipe_ctx->plane_res.hubp,
1804 			&plane_state->address,
1805 			plane_state->flip_immediate);
1806 
1807 	plane_state->status.requested_address = plane_state->address;
1808 
1809 	if (plane_state->flip_immediate)
1810 		plane_state->status.current_address = plane_state->address;
1811 
1812 	if (addr_patched)
1813 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1814 }
1815 
dcn10_set_input_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)1816 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1817 			const struct dc_plane_state *plane_state)
1818 {
1819 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1820 	const struct dc_transfer_func *tf = NULL;
1821 	bool result = true;
1822 
1823 	if (dpp_base == NULL)
1824 		return false;
1825 
1826 	tf = &plane_state->in_transfer_func;
1827 
1828 	if (!dpp_base->ctx->dc->debug.always_use_regamma
1829 		&& !plane_state->gamma_correction.is_identity
1830 			&& dce_use_lut(plane_state->format))
1831 		dpp_base->funcs->dpp_program_input_lut(dpp_base, &plane_state->gamma_correction);
1832 
1833 	if (tf->type == TF_TYPE_PREDEFINED) {
1834 		switch (tf->tf) {
1835 		case TRANSFER_FUNCTION_SRGB:
1836 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1837 			break;
1838 		case TRANSFER_FUNCTION_BT709:
1839 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1840 			break;
1841 		case TRANSFER_FUNCTION_LINEAR:
1842 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1843 			break;
1844 		case TRANSFER_FUNCTION_PQ:
1845 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1846 			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1847 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1848 			result = true;
1849 			break;
1850 		default:
1851 			result = false;
1852 			break;
1853 		}
1854 	} else if (tf->type == TF_TYPE_BYPASS) {
1855 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1856 	} else {
1857 		cm_helper_translate_curve_to_degamma_hw_format(tf,
1858 					&dpp_base->degamma_params);
1859 		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1860 				&dpp_base->degamma_params);
1861 		result = true;
1862 	}
1863 
1864 	return result;
1865 }
1866 
1867 #define MAX_NUM_HW_POINTS 0x200
1868 
log_tf(struct dc_context * ctx,const struct dc_transfer_func * tf,uint32_t hw_points_num)1869 static void log_tf(struct dc_context *ctx,
1870 				const struct dc_transfer_func *tf, uint32_t hw_points_num)
1871 {
1872 	// DC_LOG_GAMMA is default logging of all hw points
1873 	// DC_LOG_ALL_GAMMA logs all points, not only hw points
1874 	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
1875 	int i = 0;
1876 
1877 	DC_LOG_GAMMA("Gamma Correction TF");
1878 	DC_LOG_ALL_GAMMA("Logging all tf points...");
1879 	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1880 
1881 	for (i = 0; i < hw_points_num; i++) {
1882 		DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1883 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1884 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1885 	}
1886 
1887 	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1888 		DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1889 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1890 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1891 	}
1892 }
1893 
dcn10_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)1894 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1895 				const struct dc_stream_state *stream)
1896 {
1897 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1898 
1899 	if (!stream)
1900 		return false;
1901 
1902 	if (dpp == NULL)
1903 		return false;
1904 
1905 	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1906 
1907 	if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED &&
1908 	    stream->out_transfer_func.tf == TRANSFER_FUNCTION_SRGB)
1909 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1910 
1911 	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1912 	 * update.
1913 	 */
1914 	else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
1915 			&stream->out_transfer_func,
1916 			&dpp->regamma_params, false)) {
1917 		dpp->funcs->dpp_program_regamma_pwl(
1918 				dpp,
1919 				&dpp->regamma_params, OPP_REGAMMA_USER);
1920 	} else
1921 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1922 
1923 	if (stream->ctx) {
1924 		log_tf(stream->ctx,
1925 				&stream->out_transfer_func,
1926 				dpp->regamma_params.hw_points_num);
1927 	}
1928 
1929 	return true;
1930 }
1931 
dcn10_pipe_control_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1932 void dcn10_pipe_control_lock(
1933 	struct dc *dc,
1934 	struct pipe_ctx *pipe,
1935 	bool lock)
1936 {
1937 	struct dce_hwseq *hws = dc->hwseq;
1938 
1939 	/* use TG master update lock to lock everything on the TG
1940 	 * therefore only top pipe need to lock
1941 	 */
1942 	if (!pipe || pipe->top_pipe)
1943 		return;
1944 
1945 	if (dc->debug.sanity_checks)
1946 		hws->funcs.verify_allow_pstate_change_high(dc);
1947 
1948 	if (lock)
1949 		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1950 	else
1951 		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1952 
1953 	if (dc->debug.sanity_checks)
1954 		hws->funcs.verify_allow_pstate_change_high(dc);
1955 }
1956 
1957 /**
1958  * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1959  *
1960  * Software keepout workaround to prevent cursor update locking from stalling
1961  * out cursor updates indefinitely or from old values from being retained in
1962  * the case where the viewport changes in the same frame as the cursor.
1963  *
1964  * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1965  * too close to VUPDATE, then stall out until VUPDATE finishes.
1966  *
1967  * TODO: Optimize cursor programming to be once per frame before VUPDATE
1968  *       to avoid the need for this workaround.
1969  *
1970  * @dc: Current DC state
1971  * @pipe_ctx: Pipe_ctx pointer for delayed cursor update
1972  *
1973  * Return: void
1974  */
delay_cursor_until_vupdate(struct dc * dc,struct pipe_ctx * pipe_ctx)1975 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1976 {
1977 	struct dc_stream_state *stream = pipe_ctx->stream;
1978 	struct crtc_position position;
1979 	uint32_t vupdate_start, vupdate_end;
1980 	unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1981 	unsigned int us_per_line, us_vupdate;
1982 
1983 	if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1984 		return;
1985 
1986 	if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1987 		return;
1988 
1989 	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1990 				       &vupdate_end);
1991 
1992 	dc->hwss.get_position(&pipe_ctx, 1, &position);
1993 	vpos = position.vertical_count;
1994 
1995 	if (vpos <= vupdate_start) {
1996 		/* VPOS is in VACTIVE or back porch. */
1997 		lines_to_vupdate = vupdate_start - vpos;
1998 	} else {
1999 		lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
2000 	}
2001 
2002 	/* Calculate time until VUPDATE in microseconds. */
2003 	us_per_line =
2004 		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
2005 	us_to_vupdate = lines_to_vupdate * us_per_line;
2006 
2007 	/* Stall out until the cursor update completes. */
2008 	if (vupdate_end < vupdate_start)
2009 		vupdate_end += stream->timing.v_total;
2010 
2011 	/* Position is in the range of vupdate start and end*/
2012 	if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
2013 		us_to_vupdate = 0;
2014 
2015 	/* 70 us is a conservative estimate of cursor update time*/
2016 	if (us_to_vupdate > 70)
2017 		return;
2018 
2019 	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
2020 	udelay(us_to_vupdate + us_vupdate);
2021 }
2022 
dcn10_cursor_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)2023 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
2024 {
2025 	/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
2026 	if (!pipe || pipe->top_pipe)
2027 		return;
2028 
2029 	/* Prevent cursor lock from stalling out cursor updates. */
2030 	if (lock)
2031 		delay_cursor_until_vupdate(dc, pipe);
2032 
2033 	if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
2034 		union dmub_hw_lock_flags hw_locks = { 0 };
2035 		struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2036 
2037 		hw_locks.bits.lock_cursor = 1;
2038 		inst_flags.opp_inst = pipe->stream_res.opp->inst;
2039 
2040 		dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2041 					lock,
2042 					&hw_locks,
2043 					&inst_flags);
2044 	} else
2045 		dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
2046 				pipe->stream_res.opp->inst, lock);
2047 }
2048 
wait_for_reset_trigger_to_occur(struct dc_context * dc_ctx,struct timing_generator * tg)2049 static bool wait_for_reset_trigger_to_occur(
2050 	struct dc_context *dc_ctx,
2051 	struct timing_generator *tg)
2052 {
2053 	bool rc = false;
2054 
2055 	DC_LOGGER_INIT(dc_ctx->logger);
2056 
2057 	/* To avoid endless loop we wait at most
2058 	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
2059 	const uint32_t frames_to_wait_on_triggered_reset = 10;
2060 	int i;
2061 
2062 	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
2063 
2064 		if (!tg->funcs->is_counter_moving(tg)) {
2065 			DC_ERROR("TG counter is not moving!\n");
2066 			break;
2067 		}
2068 
2069 		if (tg->funcs->did_triggered_reset_occur(tg)) {
2070 			rc = true;
2071 			/* usually occurs at i=1 */
2072 			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2073 					i);
2074 			break;
2075 		}
2076 
2077 		/* Wait for one frame. */
2078 		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
2079 		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2080 	}
2081 
2082 	if (false == rc)
2083 		DC_ERROR("GSL: Timeout on reset trigger!\n");
2084 
2085 	return rc;
2086 }
2087 
reduceSizeAndFraction(uint64_t * numerator,uint64_t * denominator,bool checkUint32Bounary)2088 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2089 				      uint64_t *denominator,
2090 				      bool checkUint32Bounary)
2091 {
2092 	int i;
2093 	bool ret = checkUint32Bounary == false;
2094 	uint64_t max_int32 = 0xffffffff;
2095 	uint64_t num, denom;
2096 	static const uint16_t prime_numbers[] = {
2097 		2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2098 		47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2099 		107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2100 		167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2101 		229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2102 		283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2103 		359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2104 		431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2105 		491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2106 		571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2107 		641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2108 		709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2109 		787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2110 		859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2111 		941, 947, 953, 967, 971, 977, 983, 991, 997};
2112 	int count = ARRAY_SIZE(prime_numbers);
2113 
2114 	num = *numerator;
2115 	denom = *denominator;
2116 	for (i = 0; i < count; i++) {
2117 		uint32_t num_remainder, denom_remainder;
2118 		uint64_t num_result, denom_result;
2119 		if (checkUint32Bounary &&
2120 			num <= max_int32 && denom <= max_int32) {
2121 			ret = true;
2122 			break;
2123 		}
2124 		do {
2125 			num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2126 			denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2127 			if (num_remainder == 0 && denom_remainder == 0) {
2128 				num = num_result;
2129 				denom = denom_result;
2130 			}
2131 		} while (num_remainder == 0 && denom_remainder == 0);
2132 	}
2133 	*numerator = num;
2134 	*denominator = denom;
2135 	return ret;
2136 }
2137 
is_low_refresh_rate(struct pipe_ctx * pipe)2138 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2139 {
2140 	uint32_t master_pipe_refresh_rate =
2141 		pipe->stream->timing.pix_clk_100hz * 100 /
2142 		pipe->stream->timing.h_total /
2143 		pipe->stream->timing.v_total;
2144 	return master_pipe_refresh_rate <= 30;
2145 }
2146 
get_clock_divider(struct pipe_ctx * pipe,bool account_low_refresh_rate)2147 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2148 				 bool account_low_refresh_rate)
2149 {
2150 	uint32_t clock_divider = 1;
2151 	uint32_t numpipes = 1;
2152 
2153 	if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2154 		clock_divider *= 2;
2155 
2156 	if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2157 		clock_divider *= 2;
2158 
2159 	while (pipe->next_odm_pipe) {
2160 		pipe = pipe->next_odm_pipe;
2161 		numpipes++;
2162 	}
2163 	clock_divider *= numpipes;
2164 
2165 	return clock_divider;
2166 }
2167 
dcn10_align_pixel_clocks(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2168 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2169 				    struct pipe_ctx *grouped_pipes[])
2170 {
2171 	struct dc_context *dc_ctx = dc->ctx;
2172 	int i, master = -1, embedded = -1;
2173 	struct dc_crtc_timing *hw_crtc_timing;
2174 	uint64_t phase[MAX_PIPES];
2175 	uint64_t modulo[MAX_PIPES];
2176 	unsigned int pclk = 0;
2177 
2178 	uint32_t embedded_pix_clk_100hz;
2179 	uint16_t embedded_h_total;
2180 	uint16_t embedded_v_total;
2181 	uint32_t dp_ref_clk_100hz =
2182 		dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2183 
2184 	DC_LOGGER_INIT(dc_ctx->logger);
2185 
2186 	hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2187 	if (!hw_crtc_timing)
2188 		return master;
2189 
2190 	if (dc->config.vblank_alignment_dto_params &&
2191 		dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2192 		embedded_h_total =
2193 			(dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2194 		embedded_v_total =
2195 			(dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2196 		embedded_pix_clk_100hz =
2197 			dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2198 
2199 		for (i = 0; i < group_size; i++) {
2200 			grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2201 					grouped_pipes[i]->stream_res.tg,
2202 					&hw_crtc_timing[i]);
2203 			dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2204 				dc->res_pool->dp_clock_source,
2205 				grouped_pipes[i]->stream_res.tg->inst,
2206 				&pclk);
2207 			hw_crtc_timing[i].pix_clk_100hz = pclk;
2208 			if (dc_is_embedded_signal(
2209 					grouped_pipes[i]->stream->signal)) {
2210 				embedded = i;
2211 				master = i;
2212 				phase[i] = embedded_pix_clk_100hz*(uint64_t)100;
2213 				modulo[i] = dp_ref_clk_100hz*100;
2214 			} else {
2215 
2216 				phase[i] = (uint64_t)embedded_pix_clk_100hz*
2217 					hw_crtc_timing[i].h_total*
2218 					hw_crtc_timing[i].v_total;
2219 				phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2220 				modulo[i] = (uint64_t)dp_ref_clk_100hz*
2221 					embedded_h_total*
2222 					embedded_v_total;
2223 
2224 				if (reduceSizeAndFraction(&phase[i],
2225 						&modulo[i], true) == false) {
2226 					/*
2227 					 * this will help to stop reporting
2228 					 * this timing synchronizable
2229 					 */
2230 					DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2231 					grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2232 				}
2233 			}
2234 		}
2235 
2236 		for (i = 0; i < group_size; i++) {
2237 			if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2238 				dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2239 					dc->res_pool->dp_clock_source,
2240 					grouped_pipes[i]->stream_res.tg->inst,
2241 					phase[i], modulo[i]);
2242 				dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2243 					dc->res_pool->dp_clock_source,
2244 					grouped_pipes[i]->stream_res.tg->inst, &pclk);
2245 				grouped_pipes[i]->stream->timing.pix_clk_100hz =
2246 					pclk*get_clock_divider(grouped_pipes[i], false);
2247 				if (master == -1)
2248 					master = i;
2249 			}
2250 		}
2251 
2252 	}
2253 
2254 	kfree(hw_crtc_timing);
2255 	return master;
2256 }
2257 
dcn10_enable_vblanks_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2258 void dcn10_enable_vblanks_synchronization(
2259 	struct dc *dc,
2260 	int group_index,
2261 	int group_size,
2262 	struct pipe_ctx *grouped_pipes[])
2263 {
2264 	struct dc_context *dc_ctx = dc->ctx;
2265 	struct output_pixel_processor *opp;
2266 	struct timing_generator *tg;
2267 	int i, width = 0, height = 0, master;
2268 
2269 	DC_LOGGER_INIT(dc_ctx->logger);
2270 
2271 	for (i = 1; i < group_size; i++) {
2272 		opp = grouped_pipes[i]->stream_res.opp;
2273 		tg = grouped_pipes[i]->stream_res.tg;
2274 		tg->funcs->get_otg_active_size(tg, &width, &height);
2275 
2276 		if (!tg->funcs->is_tg_enabled(tg)) {
2277 			DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2278 			return;
2279 		}
2280 
2281 		if (opp->funcs->opp_program_dpg_dimensions)
2282 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2283 	}
2284 
2285 	for (i = 0; i < group_size; i++) {
2286 		if (grouped_pipes[i]->stream == NULL)
2287 			continue;
2288 		grouped_pipes[i]->stream->vblank_synchronized = false;
2289 		grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2290 	}
2291 
2292 	DC_SYNC_INFO("Aligning DP DTOs\n");
2293 
2294 	master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2295 
2296 	DC_SYNC_INFO("Synchronizing VBlanks\n");
2297 
2298 	if (master >= 0) {
2299 		for (i = 0; i < group_size; i++) {
2300 			if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2301 				grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2302 					grouped_pipes[master]->stream_res.tg,
2303 					grouped_pipes[i]->stream_res.tg,
2304 					grouped_pipes[master]->stream->timing.pix_clk_100hz,
2305 					grouped_pipes[i]->stream->timing.pix_clk_100hz,
2306 					get_clock_divider(grouped_pipes[master], false),
2307 					get_clock_divider(grouped_pipes[i], false));
2308 			grouped_pipes[i]->stream->vblank_synchronized = true;
2309 		}
2310 		grouped_pipes[master]->stream->vblank_synchronized = true;
2311 		DC_SYNC_INFO("Sync complete\n");
2312 	}
2313 
2314 	for (i = 1; i < group_size; i++) {
2315 		opp = grouped_pipes[i]->stream_res.opp;
2316 		tg = grouped_pipes[i]->stream_res.tg;
2317 		tg->funcs->get_otg_active_size(tg, &width, &height);
2318 		if (opp->funcs->opp_program_dpg_dimensions)
2319 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2320 	}
2321 }
2322 
dcn10_enable_timing_synchronization(struct dc * dc,struct dc_state * state,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2323 void dcn10_enable_timing_synchronization(
2324 	struct dc *dc,
2325 	struct dc_state *state,
2326 	int group_index,
2327 	int group_size,
2328 	struct pipe_ctx *grouped_pipes[])
2329 {
2330 	struct dc_context *dc_ctx = dc->ctx;
2331 	struct output_pixel_processor *opp;
2332 	struct timing_generator *tg;
2333 	int i, width = 0, height = 0;
2334 
2335 	DC_LOGGER_INIT(dc_ctx->logger);
2336 
2337 	DC_SYNC_INFO("Setting up OTG reset trigger\n");
2338 
2339 	for (i = 1; i < group_size; i++) {
2340 		if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2341 			continue;
2342 
2343 		opp = grouped_pipes[i]->stream_res.opp;
2344 		tg = grouped_pipes[i]->stream_res.tg;
2345 		tg->funcs->get_otg_active_size(tg, &width, &height);
2346 
2347 		if (!tg->funcs->is_tg_enabled(tg)) {
2348 			DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2349 			return;
2350 		}
2351 
2352 		if (opp->funcs->opp_program_dpg_dimensions)
2353 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2354 	}
2355 
2356 	for (i = 0; i < group_size; i++) {
2357 		if (grouped_pipes[i]->stream == NULL)
2358 			continue;
2359 
2360 		if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2361 			continue;
2362 
2363 		grouped_pipes[i]->stream->vblank_synchronized = false;
2364 	}
2365 
2366 	for (i = 1; i < group_size; i++) {
2367 		if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2368 			continue;
2369 
2370 		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2371 				grouped_pipes[i]->stream_res.tg,
2372 				grouped_pipes[0]->stream_res.tg->inst);
2373 	}
2374 
2375 	DC_SYNC_INFO("Waiting for trigger\n");
2376 
2377 	/* Need to get only check 1 pipe for having reset as all the others are
2378 	 * synchronized. Look at last pipe programmed to reset.
2379 	 */
2380 
2381 	if (grouped_pipes[1]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[1]) != SUBVP_PHANTOM)
2382 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2383 
2384 	for (i = 1; i < group_size; i++) {
2385 		if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2386 			continue;
2387 
2388 		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2389 				grouped_pipes[i]->stream_res.tg);
2390 	}
2391 
2392 	for (i = 1; i < group_size; i++) {
2393 		if (dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2394 			continue;
2395 
2396 		opp = grouped_pipes[i]->stream_res.opp;
2397 		tg = grouped_pipes[i]->stream_res.tg;
2398 		tg->funcs->get_otg_active_size(tg, &width, &height);
2399 		if (opp->funcs->opp_program_dpg_dimensions)
2400 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2401 	}
2402 
2403 	DC_SYNC_INFO("Sync complete\n");
2404 }
2405 
dcn10_enable_per_frame_crtc_position_reset(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2406 void dcn10_enable_per_frame_crtc_position_reset(
2407 	struct dc *dc,
2408 	int group_size,
2409 	struct pipe_ctx *grouped_pipes[])
2410 {
2411 	struct dc_context *dc_ctx = dc->ctx;
2412 	int i;
2413 
2414 	DC_LOGGER_INIT(dc_ctx->logger);
2415 
2416 	DC_SYNC_INFO("Setting up\n");
2417 	for (i = 0; i < group_size; i++)
2418 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2419 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2420 					grouped_pipes[i]->stream_res.tg,
2421 					0,
2422 					&grouped_pipes[i]->stream->triggered_crtc_reset);
2423 
2424 	DC_SYNC_INFO("Waiting for trigger\n");
2425 
2426 	for (i = 0; i < group_size; i++)
2427 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2428 
2429 	DC_SYNC_INFO("Multi-display sync is complete\n");
2430 }
2431 
mmhub_read_vm_system_aperture_settings(struct dcn10_hubp * hubp1,struct vm_system_aperture_param * apt,struct dce_hwseq * hws)2432 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2433 		struct vm_system_aperture_param *apt,
2434 		struct dce_hwseq *hws)
2435 {
2436 	PHYSICAL_ADDRESS_LOC physical_page_number;
2437 	uint32_t logical_addr_low;
2438 	uint32_t logical_addr_high;
2439 
2440 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2441 			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2442 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2443 			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2444 
2445 	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2446 			LOGICAL_ADDR, &logical_addr_low);
2447 
2448 	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2449 			LOGICAL_ADDR, &logical_addr_high);
2450 
2451 	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
2452 	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
2453 	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
2454 }
2455 
2456 /* Temporary read settings, future will get values from kmd directly */
mmhub_read_vm_context0_settings(struct dcn10_hubp * hubp1,struct vm_context0_param * vm0,struct dce_hwseq * hws)2457 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2458 		struct vm_context0_param *vm0,
2459 		struct dce_hwseq *hws)
2460 {
2461 	PHYSICAL_ADDRESS_LOC fb_base;
2462 	PHYSICAL_ADDRESS_LOC fb_offset;
2463 	uint32_t fb_base_value;
2464 	uint32_t fb_offset_value;
2465 
2466 	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2467 	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2468 
2469 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2470 			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2471 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2472 			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2473 
2474 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2475 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2476 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2477 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2478 
2479 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2480 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2481 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2482 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2483 
2484 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2485 			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2486 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2487 			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2488 
2489 	/*
2490 	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2491 	 * Therefore we need to do
2492 	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2493 	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2494 	 */
2495 	fb_base.quad_part = (uint64_t)fb_base_value << 24;
2496 	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2497 	vm0->pte_base.quad_part += fb_base.quad_part;
2498 	vm0->pte_base.quad_part -= fb_offset.quad_part;
2499 }
2500 
2501 
dcn10_program_pte_vm(struct dce_hwseq * hws,struct hubp * hubp)2502 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2503 {
2504 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2505 	struct vm_system_aperture_param apt = {0};
2506 	struct vm_context0_param vm0 = {0};
2507 
2508 	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2509 	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2510 
2511 	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2512 	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2513 }
2514 
dcn10_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2515 static void dcn10_enable_plane(
2516 	struct dc *dc,
2517 	struct pipe_ctx *pipe_ctx,
2518 	struct dc_state *context)
2519 {
2520 	struct dce_hwseq *hws = dc->hwseq;
2521 
2522 	if (dc->debug.sanity_checks) {
2523 		hws->funcs.verify_allow_pstate_change_high(dc);
2524 	}
2525 
2526 	undo_DEGVIDCN10_253_wa(dc);
2527 
2528 	power_on_plane_resources(dc->hwseq,
2529 		pipe_ctx->plane_res.hubp->inst);
2530 
2531 	/* enable DCFCLK current DCHUB */
2532 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2533 
2534 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
2535 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2536 			pipe_ctx->stream_res.opp,
2537 			true);
2538 
2539 	if (dc->config.gpu_vm_support)
2540 		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2541 
2542 	if (dc->debug.sanity_checks) {
2543 		hws->funcs.verify_allow_pstate_change_high(dc);
2544 	}
2545 
2546 	if (!pipe_ctx->top_pipe
2547 		&& pipe_ctx->plane_state
2548 		&& pipe_ctx->plane_state->flip_int_enabled
2549 		&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2550 			pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2551 
2552 }
2553 
dcn10_program_gamut_remap(struct pipe_ctx * pipe_ctx)2554 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2555 {
2556 	int i = 0;
2557 	struct dpp_grph_csc_adjustment adjust;
2558 	memset(&adjust, 0, sizeof(adjust));
2559 	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2560 
2561 
2562 	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2563 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2564 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2565 			adjust.temperature_matrix[i] =
2566 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2567 	} else if (pipe_ctx->plane_state &&
2568 		   pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2569 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2570 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2571 			adjust.temperature_matrix[i] =
2572 				pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2573 	}
2574 
2575 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2576 }
2577 
2578 
dcn10_is_rear_mpo_fix_required(struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace)2579 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2580 {
2581 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2582 		if (pipe_ctx->top_pipe) {
2583 			struct pipe_ctx *top = pipe_ctx->top_pipe;
2584 
2585 			while (top->top_pipe)
2586 				top = top->top_pipe; // Traverse to top pipe_ctx
2587 			if (top->plane_state && top->plane_state->layer_index == 0 && !top->plane_state->global_alpha)
2588 				// Global alpha used by top plane for PIP overlay
2589 				// Pre-multiplied/per-pixel alpha used by MPO
2590 				// Check top plane's global alpha to ensure layer_index > 0 not caused by PIP
2591 				return true; // MPO in use and front plane not hidden
2592 		}
2593 	}
2594 	return false;
2595 }
2596 
dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx * pipe_ctx,uint16_t * matrix)2597 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2598 {
2599 	// Override rear plane RGB bias to fix MPO brightness
2600 	uint16_t rgb_bias = matrix[3];
2601 
2602 	matrix[3] = 0;
2603 	matrix[7] = 0;
2604 	matrix[11] = 0;
2605 	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2606 	matrix[3] = rgb_bias;
2607 	matrix[7] = rgb_bias;
2608 	matrix[11] = rgb_bias;
2609 }
2610 
dcn10_program_output_csc(struct dc * dc,struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace,uint16_t * matrix,int opp_id)2611 void dcn10_program_output_csc(struct dc *dc,
2612 		struct pipe_ctx *pipe_ctx,
2613 		enum dc_color_space colorspace,
2614 		uint16_t *matrix,
2615 		int opp_id)
2616 {
2617 	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2618 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2619 
2620 			/* MPO is broken with RGB colorspaces when OCSC matrix
2621 			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2622 			 * Blending adds offsets from front + rear to rear plane
2623 			 *
2624 			 * Fix is to set RGB bias to 0 on rear plane, top plane
2625 			 * black value pixels add offset instead of rear + front
2626 			 */
2627 
2628 			int16_t rgb_bias = matrix[3];
2629 			// matrix[3/7/11] are all the same offset value
2630 
2631 			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2632 				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2633 			} else {
2634 				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2635 			}
2636 		}
2637 	} else {
2638 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2639 			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2640 	}
2641 }
2642 
dcn10_update_dpp(struct dpp * dpp,struct dc_plane_state * plane_state)2643 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2644 {
2645 	struct dc_bias_and_scale bns_params = {0};
2646 
2647 	// program the input csc
2648 	dpp->funcs->dpp_setup(dpp,
2649 			plane_state->format,
2650 			EXPANSION_MODE_ZERO,
2651 			plane_state->input_csc_color_matrix,
2652 			plane_state->color_space,
2653 			NULL);
2654 
2655 	//set scale and bias registers
2656 	build_prescale_params(&bns_params, plane_state);
2657 	if (dpp->funcs->dpp_program_bias_and_scale)
2658 		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2659 }
2660 
dcn10_update_visual_confirm_color(struct dc * dc,struct pipe_ctx * pipe_ctx,int mpcc_id)2661 void dcn10_update_visual_confirm_color(struct dc *dc,
2662 		struct pipe_ctx *pipe_ctx,
2663 		int mpcc_id)
2664 {
2665 	struct mpc *mpc = dc->res_pool->mpc;
2666 
2667 	if (mpc->funcs->set_bg_color) {
2668 		memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color));
2669 		mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
2670 	}
2671 }
2672 
dcn10_update_mpcc(struct dc * dc,struct pipe_ctx * pipe_ctx)2673 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2674 {
2675 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2676 	struct mpcc_blnd_cfg blnd_cfg = {0};
2677 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2678 	int mpcc_id;
2679 	struct mpcc *new_mpcc;
2680 	struct mpc *mpc = dc->res_pool->mpc;
2681 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2682 
2683 	blnd_cfg.overlap_only = false;
2684 	blnd_cfg.global_gain = 0xff;
2685 
2686 	if (per_pixel_alpha) {
2687 		/* DCN1.0 has output CM before MPC which seems to screw with
2688 		 * pre-multiplied alpha.
2689 		 */
2690 		blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2691 				pipe_ctx->stream->output_color_space)
2692 						&& pipe_ctx->plane_state->pre_multiplied_alpha);
2693 		if (pipe_ctx->plane_state->global_alpha) {
2694 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2695 			blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2696 		} else {
2697 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2698 		}
2699 	} else {
2700 		blnd_cfg.pre_multiplied_alpha = false;
2701 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2702 	}
2703 
2704 	if (pipe_ctx->plane_state->global_alpha)
2705 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2706 	else
2707 		blnd_cfg.global_alpha = 0xff;
2708 
2709 	/*
2710 	 * TODO: remove hack
2711 	 * Note: currently there is a bug in init_hw such that
2712 	 * on resume from hibernate, BIOS sets up MPCC0, and
2713 	 * we do mpcc_remove but the mpcc cannot go to idle
2714 	 * after remove. This cause us to pick mpcc1 here,
2715 	 * which causes a pstate hang for yet unknown reason.
2716 	 */
2717 	mpcc_id = hubp->inst;
2718 
2719 	/* If there is no full update, don't need to touch MPC tree*/
2720 	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2721 		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2722 		dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2723 		return;
2724 	}
2725 
2726 	/* check if this MPCC is already being used */
2727 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2728 	/* remove MPCC if being used */
2729 	if (new_mpcc != NULL)
2730 		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2731 	else
2732 		if (dc->debug.sanity_checks)
2733 			mpc->funcs->assert_mpcc_idle_before_connect(
2734 					dc->res_pool->mpc, mpcc_id);
2735 
2736 	/* Call MPC to insert new plane */
2737 	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2738 			mpc_tree_params,
2739 			&blnd_cfg,
2740 			NULL,
2741 			NULL,
2742 			hubp->inst,
2743 			mpcc_id);
2744 	dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2745 
2746 	ASSERT(new_mpcc != NULL);
2747 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2748 	hubp->mpcc_id = mpcc_id;
2749 }
2750 
update_scaler(struct pipe_ctx * pipe_ctx)2751 static void update_scaler(struct pipe_ctx *pipe_ctx)
2752 {
2753 	bool per_pixel_alpha =
2754 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2755 
2756 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2757 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2758 	/* scaler configuration */
2759 	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2760 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2761 }
2762 
dcn10_update_dchubp_dpp(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2763 static void dcn10_update_dchubp_dpp(
2764 	struct dc *dc,
2765 	struct pipe_ctx *pipe_ctx,
2766 	struct dc_state *context)
2767 {
2768 	struct dce_hwseq *hws = dc->hwseq;
2769 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2770 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2771 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2772 	struct plane_size size = plane_state->plane_size;
2773 	unsigned int compat_level = 0;
2774 	bool should_divided_by_2 = false;
2775 
2776 	/* depends on DML calculation, DPP clock value may change dynamically */
2777 	/* If request max dpp clk is lower than current dispclk, no need to
2778 	 * divided by 2
2779 	 */
2780 	if (plane_state->update_flags.bits.full_update) {
2781 
2782 		/* new calculated dispclk, dppclk are stored in
2783 		 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2784 		 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2785 		 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2786 		 * dispclk will put in use after optimize_bandwidth when
2787 		 * ramp_up_dispclk_with_dpp is called.
2788 		 * there are two places for dppclk be put in use. One location
2789 		 * is the same as the location as dispclk. Another is within
2790 		 * update_dchubp_dpp which happens between pre_bandwidth and
2791 		 * optimize_bandwidth.
2792 		 * dppclk updated within update_dchubp_dpp will cause new
2793 		 * clock values of dispclk and dppclk not be in use at the same
2794 		 * time. when clocks are decreased, this may cause dppclk is
2795 		 * lower than previous configuration and let pipe stuck.
2796 		 * for example, eDP + external dp,  change resolution of DP from
2797 		 * 1920x1080x144hz to 1280x960x60hz.
2798 		 * before change: dispclk = 337889 dppclk = 337889
2799 		 * change mode, dcn10_validate_bandwidth calculate
2800 		 *                dispclk = 143122 dppclk = 143122
2801 		 * update_dchubp_dpp be executed before dispclk be updated,
2802 		 * dispclk = 337889, but dppclk use new value dispclk /2 =
2803 		 * 168944. this will cause pipe pstate warning issue.
2804 		 * solution: between pre_bandwidth and optimize_bandwidth, while
2805 		 * dispclk is going to be decreased, keep dppclk = dispclk
2806 		 **/
2807 		if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2808 				dc->clk_mgr->clks.dispclk_khz)
2809 			should_divided_by_2 = false;
2810 		else
2811 			should_divided_by_2 =
2812 					context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2813 					dc->clk_mgr->clks.dispclk_khz / 2;
2814 
2815 		dpp->funcs->dpp_dppclk_control(
2816 				dpp,
2817 				should_divided_by_2,
2818 				true);
2819 
2820 		if (dc->res_pool->dccg)
2821 			dc->res_pool->dccg->funcs->update_dpp_dto(
2822 					dc->res_pool->dccg,
2823 					dpp->inst,
2824 					pipe_ctx->plane_res.bw.dppclk_khz);
2825 		else
2826 			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2827 						dc->clk_mgr->clks.dispclk_khz / 2 :
2828 							dc->clk_mgr->clks.dispclk_khz;
2829 	}
2830 
2831 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2832 	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2833 	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2834 	 */
2835 	if (plane_state->update_flags.bits.full_update) {
2836 		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2837 
2838 		hubp->funcs->hubp_setup(
2839 			hubp,
2840 			&pipe_ctx->dlg_regs,
2841 			&pipe_ctx->ttu_regs,
2842 			&pipe_ctx->rq_regs,
2843 			&pipe_ctx->pipe_dlg_param);
2844 		hubp->funcs->hubp_setup_interdependent(
2845 			hubp,
2846 			&pipe_ctx->dlg_regs,
2847 			&pipe_ctx->ttu_regs);
2848 	}
2849 
2850 	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2851 
2852 	if (plane_state->update_flags.bits.full_update ||
2853 		plane_state->update_flags.bits.bpp_change)
2854 		dcn10_update_dpp(dpp, plane_state);
2855 
2856 	if (plane_state->update_flags.bits.full_update ||
2857 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2858 		plane_state->update_flags.bits.global_alpha_change)
2859 		hws->funcs.update_mpcc(dc, pipe_ctx);
2860 
2861 	if (plane_state->update_flags.bits.full_update ||
2862 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2863 		plane_state->update_flags.bits.global_alpha_change ||
2864 		plane_state->update_flags.bits.scaling_change ||
2865 		plane_state->update_flags.bits.position_change) {
2866 		update_scaler(pipe_ctx);
2867 	}
2868 
2869 	if (plane_state->update_flags.bits.full_update ||
2870 		plane_state->update_flags.bits.scaling_change ||
2871 		plane_state->update_flags.bits.position_change) {
2872 		hubp->funcs->mem_program_viewport(
2873 			hubp,
2874 			&pipe_ctx->plane_res.scl_data.viewport,
2875 			&pipe_ctx->plane_res.scl_data.viewport_c);
2876 	}
2877 
2878 	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2879 		dc->hwss.set_cursor_attribute(pipe_ctx);
2880 		dc->hwss.set_cursor_position(pipe_ctx);
2881 
2882 		if (dc->hwss.set_cursor_sdr_white_level)
2883 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2884 	}
2885 
2886 	if (plane_state->update_flags.bits.full_update) {
2887 		/*gamut remap*/
2888 		dc->hwss.program_gamut_remap(pipe_ctx);
2889 
2890 		dc->hwss.program_output_csc(dc,
2891 				pipe_ctx,
2892 				pipe_ctx->stream->output_color_space,
2893 				pipe_ctx->stream->csc_color_matrix.matrix,
2894 				pipe_ctx->stream_res.opp->inst);
2895 	}
2896 
2897 	if (plane_state->update_flags.bits.full_update ||
2898 		plane_state->update_flags.bits.pixel_format_change ||
2899 		plane_state->update_flags.bits.horizontal_mirror_change ||
2900 		plane_state->update_flags.bits.rotation_change ||
2901 		plane_state->update_flags.bits.swizzle_change ||
2902 		plane_state->update_flags.bits.dcc_change ||
2903 		plane_state->update_flags.bits.bpp_change ||
2904 		plane_state->update_flags.bits.scaling_change ||
2905 		plane_state->update_flags.bits.plane_size_change) {
2906 		hubp->funcs->hubp_program_surface_config(
2907 			hubp,
2908 			plane_state->format,
2909 			&plane_state->tiling_info,
2910 			&size,
2911 			plane_state->rotation,
2912 			&plane_state->dcc,
2913 			plane_state->horizontal_mirror,
2914 			compat_level);
2915 	}
2916 
2917 	hubp->power_gated = false;
2918 
2919 	dc->hwss.update_plane_addr(dc, pipe_ctx);
2920 
2921 	if (is_pipe_tree_visible(pipe_ctx))
2922 		hubp->funcs->set_blank(hubp, false);
2923 }
2924 
dcn10_blank_pixel_data(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank)2925 void dcn10_blank_pixel_data(
2926 		struct dc *dc,
2927 		struct pipe_ctx *pipe_ctx,
2928 		bool blank)
2929 {
2930 	enum dc_color_space color_space;
2931 	struct tg_color black_color = {0};
2932 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
2933 	struct dc_stream_state *stream = pipe_ctx->stream;
2934 
2935 	/* program otg blank color */
2936 	color_space = stream->output_color_space;
2937 	color_space_to_black_color(dc, color_space, &black_color);
2938 
2939 	/*
2940 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
2941 	 * alternate between Cb and Cr, so both channels need the pixel
2942 	 * value for Y
2943 	 */
2944 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2945 		black_color.color_r_cr = black_color.color_g_y;
2946 
2947 
2948 	if (stream_res->tg->funcs->set_blank_color)
2949 		stream_res->tg->funcs->set_blank_color(
2950 				stream_res->tg,
2951 				&black_color);
2952 
2953 	if (!blank) {
2954 		if (stream_res->tg->funcs->set_blank)
2955 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2956 		if (stream_res->abm) {
2957 			dc->hwss.set_pipe(pipe_ctx);
2958 			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2959 		}
2960 	} else {
2961 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
2962 		if (stream_res->tg->funcs->set_blank) {
2963 			stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2964 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2965 		}
2966 	}
2967 }
2968 
dcn10_set_hdr_multiplier(struct pipe_ctx * pipe_ctx)2969 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2970 {
2971 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2972 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2973 	struct custom_float_format fmt;
2974 
2975 	fmt.exponenta_bits = 6;
2976 	fmt.mantissa_bits = 12;
2977 	fmt.sign = true;
2978 
2979 
2980 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2981 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2982 
2983 	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2984 			pipe_ctx->plane_res.dpp, hw_mult);
2985 }
2986 
dcn10_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2987 void dcn10_program_pipe(
2988 		struct dc *dc,
2989 		struct pipe_ctx *pipe_ctx,
2990 		struct dc_state *context)
2991 {
2992 	struct dce_hwseq *hws = dc->hwseq;
2993 
2994 	if (pipe_ctx->top_pipe == NULL) {
2995 		bool blank = !is_pipe_tree_visible(pipe_ctx);
2996 
2997 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
2998 				pipe_ctx->stream_res.tg,
2999 				calculate_vready_offset_for_group(pipe_ctx),
3000 				pipe_ctx->pipe_dlg_param.vstartup_start,
3001 				pipe_ctx->pipe_dlg_param.vupdate_offset,
3002 				pipe_ctx->pipe_dlg_param.vupdate_width,
3003 				pipe_ctx->pipe_dlg_param.pstate_keepout);
3004 
3005 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
3006 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
3007 
3008 		if (hws->funcs.setup_vupdate_interrupt)
3009 			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
3010 
3011 		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
3012 	}
3013 
3014 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
3015 		dcn10_enable_plane(dc, pipe_ctx, context);
3016 
3017 	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
3018 
3019 	hws->funcs.set_hdr_multiplier(pipe_ctx);
3020 
3021 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
3022 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
3023 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
3024 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
3025 
3026 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
3027 	 * only do gamma programming for full update.
3028 	 * TODO: This can be further optimized/cleaned up
3029 	 * Always call this for now since it does memcmp inside before
3030 	 * doing heavy calculation and programming
3031 	 */
3032 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
3033 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
3034 }
3035 
dcn10_wait_for_pending_cleared(struct dc * dc,struct dc_state * context)3036 void dcn10_wait_for_pending_cleared(struct dc *dc,
3037 		struct dc_state *context)
3038 {
3039 		struct pipe_ctx *pipe_ctx;
3040 		struct timing_generator *tg;
3041 		int i;
3042 
3043 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
3044 			pipe_ctx = &context->res_ctx.pipe_ctx[i];
3045 			tg = pipe_ctx->stream_res.tg;
3046 
3047 			/*
3048 			 * Only wait for top pipe's tg penindg bit
3049 			 * Also skip if pipe is disabled.
3050 			 */
3051 			if (pipe_ctx->top_pipe ||
3052 			    !pipe_ctx->stream || !pipe_ctx->plane_state ||
3053 			    !tg->funcs->is_tg_enabled(tg))
3054 				continue;
3055 
3056 			/*
3057 			 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
3058 			 * For some reason waiting for OTG_UPDATE_PENDING cleared
3059 			 * seems to not trigger the update right away, and if we
3060 			 * lock again before VUPDATE then we don't get a separated
3061 			 * operation.
3062 			 */
3063 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
3064 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
3065 		}
3066 }
3067 
dcn10_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)3068 void dcn10_post_unlock_program_front_end(
3069 		struct dc *dc,
3070 		struct dc_state *context)
3071 {
3072 	int i;
3073 
3074 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3075 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3076 
3077 		if (!pipe_ctx->top_pipe &&
3078 			!pipe_ctx->prev_odm_pipe &&
3079 			pipe_ctx->stream) {
3080 			struct timing_generator *tg = pipe_ctx->stream_res.tg;
3081 
3082 			if (context->stream_status[i].plane_count == 0)
3083 				false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3084 		}
3085 	}
3086 
3087 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3088 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3089 			dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
3090 
3091 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3092 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3093 			dc->hwss.optimize_bandwidth(dc, context);
3094 			break;
3095 		}
3096 
3097 	if (dc->hwseq->wa.DEGVIDCN10_254)
3098 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3099 }
3100 
dcn10_stereo_hw_frame_pack_wa(struct dc * dc,struct dc_state * context)3101 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3102 {
3103 	uint8_t i;
3104 
3105 	for (i = 0; i < context->stream_count; i++) {
3106 		if (context->streams[i]->timing.timing_3d_format
3107 				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3108 			/*
3109 			 * Disable stutter
3110 			 */
3111 			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3112 			break;
3113 		}
3114 	}
3115 }
3116 
dcn10_prepare_bandwidth(struct dc * dc,struct dc_state * context)3117 void dcn10_prepare_bandwidth(
3118 		struct dc *dc,
3119 		struct dc_state *context)
3120 {
3121 	struct dce_hwseq *hws = dc->hwseq;
3122 	struct hubbub *hubbub = dc->res_pool->hubbub;
3123 	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3124 
3125 	if (dc->debug.sanity_checks)
3126 		hws->funcs.verify_allow_pstate_change_high(dc);
3127 
3128 	if (context->stream_count == 0)
3129 		context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3130 
3131 	dc->clk_mgr->funcs->update_clocks(
3132 			dc->clk_mgr,
3133 			context,
3134 			false);
3135 
3136 	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3137 			&context->bw_ctx.bw.dcn.watermarks,
3138 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3139 			true);
3140 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3141 
3142 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3143 		DC_FP_START();
3144 		dcn_get_soc_clks(
3145 			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3146 		DC_FP_END();
3147 		dcn_bw_notify_pplib_of_wm_ranges(
3148 			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3149 	}
3150 
3151 	if (dc->debug.sanity_checks)
3152 		hws->funcs.verify_allow_pstate_change_high(dc);
3153 }
3154 
dcn10_optimize_bandwidth(struct dc * dc,struct dc_state * context)3155 void dcn10_optimize_bandwidth(
3156 		struct dc *dc,
3157 		struct dc_state *context)
3158 {
3159 	struct dce_hwseq *hws = dc->hwseq;
3160 	struct hubbub *hubbub = dc->res_pool->hubbub;
3161 	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3162 
3163 	if (dc->debug.sanity_checks)
3164 		hws->funcs.verify_allow_pstate_change_high(dc);
3165 
3166 	if (context->stream_count == 0)
3167 		context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3168 
3169 	dc->clk_mgr->funcs->update_clocks(
3170 			dc->clk_mgr,
3171 			context,
3172 			true);
3173 
3174 	hubbub->funcs->program_watermarks(hubbub,
3175 			&context->bw_ctx.bw.dcn.watermarks,
3176 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3177 			true);
3178 
3179 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3180 
3181 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3182 		DC_FP_START();
3183 		dcn_get_soc_clks(
3184 			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3185 		DC_FP_END();
3186 		dcn_bw_notify_pplib_of_wm_ranges(
3187 			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3188 	}
3189 
3190 	if (dc->debug.sanity_checks)
3191 		hws->funcs.verify_allow_pstate_change_high(dc);
3192 }
3193 
dcn10_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,struct dc_crtc_timing_adjust adjust)3194 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3195 		int num_pipes, struct dc_crtc_timing_adjust adjust)
3196 {
3197 	int i = 0;
3198 	struct drr_params params = {0};
3199 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3200 	unsigned int event_triggers = 0x800;
3201 	// Note DRR trigger events are generated regardless of whether num frames met.
3202 	unsigned int num_frames = 2;
3203 
3204 	params.vertical_total_max = adjust.v_total_max;
3205 	params.vertical_total_min = adjust.v_total_min;
3206 	params.vertical_total_mid = adjust.v_total_mid;
3207 	params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3208 	/* TODO: If multiple pipes are to be supported, you need
3209 	 * some GSL stuff. Static screen triggers may be programmed differently
3210 	 * as well.
3211 	 */
3212 	for (i = 0; i < num_pipes; i++) {
3213 		/* dc_state_destruct() might null the stream resources, so fetch tg
3214 		 * here first to avoid a race condition. The lifetime of the pointee
3215 		 * itself (the timing_generator object) is not a problem here.
3216 		 */
3217 		struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
3218 
3219 		if ((tg != NULL) && tg->funcs) {
3220 			if (tg->funcs->set_drr)
3221 				tg->funcs->set_drr(tg, &params);
3222 			if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3223 				if (tg->funcs->set_static_screen_control)
3224 					tg->funcs->set_static_screen_control(
3225 						tg, event_triggers, num_frames);
3226 		}
3227 	}
3228 }
3229 
dcn10_get_position(struct pipe_ctx ** pipe_ctx,int num_pipes,struct crtc_position * position)3230 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3231 		int num_pipes,
3232 		struct crtc_position *position)
3233 {
3234 	int i = 0;
3235 
3236 	/* TODO: handle pipes > 1
3237 	 */
3238 	for (i = 0; i < num_pipes; i++)
3239 		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3240 }
3241 
dcn10_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_params * params)3242 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3243 		int num_pipes, const struct dc_static_screen_params *params)
3244 {
3245 	unsigned int i;
3246 	unsigned int triggers = 0;
3247 
3248 	if (params->triggers.surface_update)
3249 		triggers |= 0x80;
3250 	if (params->triggers.cursor_update)
3251 		triggers |= 0x2;
3252 	if (params->triggers.force_trigger)
3253 		triggers |= 0x1;
3254 
3255 	for (i = 0; i < num_pipes; i++)
3256 		pipe_ctx[i]->stream_res.tg->funcs->
3257 			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3258 					triggers, params->num_frames);
3259 }
3260 
dcn10_config_stereo_parameters(struct dc_stream_state * stream,struct crtc_stereo_flags * flags)3261 static void dcn10_config_stereo_parameters(
3262 		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3263 {
3264 	enum view_3d_format view_format = stream->view_format;
3265 	enum dc_timing_3d_format timing_3d_format =\
3266 			stream->timing.timing_3d_format;
3267 	bool non_stereo_timing = false;
3268 
3269 	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3270 		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3271 		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3272 		non_stereo_timing = true;
3273 
3274 	if (non_stereo_timing == false &&
3275 		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3276 
3277 		flags->PROGRAM_STEREO         = 1;
3278 		flags->PROGRAM_POLARITY       = 1;
3279 		if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3280 			timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3281 			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3282 			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3283 
3284 			if (stream->link && stream->link->ddc) {
3285 				enum display_dongle_type dongle = \
3286 						stream->link->ddc->dongle_type;
3287 
3288 				if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3289 					dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3290 					dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3291 					flags->DISABLE_STEREO_DP_SYNC = 1;
3292 			}
3293 		}
3294 		flags->RIGHT_EYE_POLARITY =\
3295 				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3296 		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3297 			flags->FRAME_PACKED = 1;
3298 	}
3299 
3300 	return;
3301 }
3302 
dcn10_setup_stereo(struct pipe_ctx * pipe_ctx,struct dc * dc)3303 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3304 {
3305 	struct crtc_stereo_flags flags = { 0 };
3306 	struct dc_stream_state *stream = pipe_ctx->stream;
3307 
3308 	dcn10_config_stereo_parameters(stream, &flags);
3309 
3310 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3311 		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3312 			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3313 	} else {
3314 		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3315 	}
3316 
3317 	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3318 		pipe_ctx->stream_res.opp,
3319 		flags.PROGRAM_STEREO == 1,
3320 		&stream->timing);
3321 
3322 	pipe_ctx->stream_res.tg->funcs->program_stereo(
3323 		pipe_ctx->stream_res.tg,
3324 		&stream->timing,
3325 		&flags);
3326 
3327 	return;
3328 }
3329 
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)3330 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3331 {
3332 	int i;
3333 
3334 	for (i = 0; i < res_pool->pipe_count; i++) {
3335 		if (res_pool->hubps[i]->inst == mpcc_inst)
3336 			return res_pool->hubps[i];
3337 	}
3338 	ASSERT(false);
3339 	return NULL;
3340 }
3341 
dcn10_wait_for_mpcc_disconnect(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx)3342 void dcn10_wait_for_mpcc_disconnect(
3343 		struct dc *dc,
3344 		struct resource_pool *res_pool,
3345 		struct pipe_ctx *pipe_ctx)
3346 {
3347 	struct dce_hwseq *hws = dc->hwseq;
3348 	int mpcc_inst;
3349 
3350 	if (dc->debug.sanity_checks) {
3351 		hws->funcs.verify_allow_pstate_change_high(dc);
3352 	}
3353 
3354 	if (!pipe_ctx->stream_res.opp)
3355 		return;
3356 
3357 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3358 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3359 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3360 
3361 			if (pipe_ctx->stream_res.tg &&
3362 				pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3363 				res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3364 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3365 			hubp->funcs->set_blank(hubp, true);
3366 		}
3367 	}
3368 
3369 	if (dc->debug.sanity_checks) {
3370 		hws->funcs.verify_allow_pstate_change_high(dc);
3371 	}
3372 
3373 }
3374 
dcn10_dummy_display_power_gating(struct dc * dc,uint8_t controller_id,struct dc_bios * dcb,enum pipe_gating_control power_gating)3375 bool dcn10_dummy_display_power_gating(
3376 	struct dc *dc,
3377 	uint8_t controller_id,
3378 	struct dc_bios *dcb,
3379 	enum pipe_gating_control power_gating)
3380 {
3381 	return true;
3382 }
3383 
dcn10_update_pending_status(struct pipe_ctx * pipe_ctx)3384 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3385 {
3386 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3387 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3388 	bool flip_pending;
3389 	struct dc *dc = pipe_ctx->stream->ctx->dc;
3390 
3391 	if (plane_state == NULL)
3392 		return;
3393 
3394 	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3395 					pipe_ctx->plane_res.hubp);
3396 
3397 	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3398 
3399 	if (!flip_pending)
3400 		plane_state->status.current_address = plane_state->status.requested_address;
3401 
3402 	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3403 			tg->funcs->is_stereo_left_eye) {
3404 		plane_state->status.is_right_eye =
3405 				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3406 	}
3407 
3408 	if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3409 		struct dce_hwseq *hwseq = dc->hwseq;
3410 		struct timing_generator *tg = dc->res_pool->timing_generators[0];
3411 		unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3412 
3413 		if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3414 			struct hubbub *hubbub = dc->res_pool->hubbub;
3415 
3416 			hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3417 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3418 		}
3419 	}
3420 }
3421 
dcn10_update_dchub(struct dce_hwseq * hws,struct dchub_init_data * dh_data)3422 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3423 {
3424 	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3425 
3426 	/* In DCN, this programming sequence is owned by the hubbub */
3427 	hubbub->funcs->update_dchub(hubbub, dh_data);
3428 }
3429 
dcn10_can_pipe_disable_cursor(struct pipe_ctx * pipe_ctx)3430 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3431 {
3432 	struct pipe_ctx *test_pipe, *split_pipe;
3433 	const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3434 	struct rect r1 = scl_data->recout, r2, r2_half;
3435 	int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3436 	int cur_layer = pipe_ctx->plane_state->layer_index;
3437 
3438 	/**
3439 	 * Disable the cursor if there's another pipe above this with a
3440 	 * plane that contains this pipe's viewport to prevent double cursor
3441 	 * and incorrect scaling artifacts.
3442 	 */
3443 	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3444 	     test_pipe = test_pipe->top_pipe) {
3445 		// Skip invisible layer and pipe-split plane on same layer
3446 		if (!test_pipe->plane_state ||
3447 		    !test_pipe->plane_state->visible ||
3448 		    test_pipe->plane_state->layer_index == cur_layer)
3449 			continue;
3450 
3451 		r2 = test_pipe->plane_res.scl_data.recout;
3452 		r2_r = r2.x + r2.width;
3453 		r2_b = r2.y + r2.height;
3454 
3455 		/**
3456 		 * There is another half plane on same layer because of
3457 		 * pipe-split, merge together per same height.
3458 		 */
3459 		for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3460 		     split_pipe = split_pipe->top_pipe)
3461 			if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3462 				r2_half = split_pipe->plane_res.scl_data.recout;
3463 				r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3464 				r2.width = r2.width + r2_half.width;
3465 				r2_r = r2.x + r2.width;
3466 				break;
3467 			}
3468 
3469 		if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3470 			return true;
3471 	}
3472 
3473 	return false;
3474 }
3475 
dcn10_set_cursor_position(struct pipe_ctx * pipe_ctx)3476 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3477 {
3478 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3479 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3480 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3481 	struct dc_cursor_mi_param param = {
3482 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3483 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3484 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
3485 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3486 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3487 		.rotation = pipe_ctx->plane_state->rotation,
3488 		.mirror = pipe_ctx->plane_state->horizontal_mirror,
3489 		.stream = pipe_ctx->stream,
3490 	};
3491 	bool pipe_split_on = false;
3492 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3493 		(pipe_ctx->prev_odm_pipe != NULL);
3494 
3495 	int x_plane = pipe_ctx->plane_state->dst_rect.x;
3496 	int y_plane = pipe_ctx->plane_state->dst_rect.y;
3497 	int x_pos = pos_cpy.x;
3498 	int y_pos = pos_cpy.y;
3499 
3500 	if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3501 		if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3502 			(pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3503 			pipe_split_on = true;
3504 		}
3505 	}
3506 
3507 	/**
3508 	 * DC cursor is stream space, HW cursor is plane space and drawn
3509 	 * as part of the framebuffer.
3510 	 *
3511 	 * Cursor position can't be negative, but hotspot can be used to
3512 	 * shift cursor out of the plane bounds. Hotspot must be smaller
3513 	 * than the cursor size.
3514 	 */
3515 
3516 	/**
3517 	 * Translate cursor from stream space to plane space.
3518 	 *
3519 	 * If the cursor is scaled then we need to scale the position
3520 	 * to be in the approximately correct place. We can't do anything
3521 	 * about the actual size being incorrect, that's a limitation of
3522 	 * the hardware.
3523 	 */
3524 	if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3525 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3526 				pipe_ctx->plane_state->dst_rect.width;
3527 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3528 				pipe_ctx->plane_state->dst_rect.height;
3529 	} else {
3530 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3531 				pipe_ctx->plane_state->dst_rect.width;
3532 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3533 				pipe_ctx->plane_state->dst_rect.height;
3534 	}
3535 
3536 	/**
3537 	 * If the cursor's source viewport is clipped then we need to
3538 	 * translate the cursor to appear in the correct position on
3539 	 * the screen.
3540 	 *
3541 	 * This translation isn't affected by scaling so it needs to be
3542 	 * done *after* we adjust the position for the scale factor.
3543 	 *
3544 	 * This is only done by opt-in for now since there are still
3545 	 * some usecases like tiled display that might enable the
3546 	 * cursor on both streams while expecting dc to clip it.
3547 	 */
3548 	if (pos_cpy.translate_by_source) {
3549 		x_pos += pipe_ctx->plane_state->src_rect.x;
3550 		y_pos += pipe_ctx->plane_state->src_rect.y;
3551 	}
3552 
3553 	/**
3554 	 * If the position is negative then we need to add to the hotspot
3555 	 * to shift the cursor outside the plane.
3556 	 */
3557 
3558 	if (x_pos < 0) {
3559 		pos_cpy.x_hotspot -= x_pos;
3560 		x_pos = 0;
3561 	}
3562 
3563 	if (y_pos < 0) {
3564 		pos_cpy.y_hotspot -= y_pos;
3565 		y_pos = 0;
3566 	}
3567 
3568 	pos_cpy.x = (uint32_t)x_pos;
3569 	pos_cpy.y = (uint32_t)y_pos;
3570 
3571 	if (pipe_ctx->plane_state->address.type
3572 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3573 		pos_cpy.enable = false;
3574 
3575 	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3576 		pos_cpy.enable = false;
3577 
3578 
3579 	if (param.rotation == ROTATION_ANGLE_0) {
3580 		int viewport_width =
3581 			pipe_ctx->plane_res.scl_data.viewport.width;
3582 		int viewport_x =
3583 			pipe_ctx->plane_res.scl_data.viewport.x;
3584 
3585 		if (param.mirror) {
3586 			if (pipe_split_on || odm_combine_on) {
3587 				if (pos_cpy.x >= viewport_width + viewport_x) {
3588 					pos_cpy.x = 2 * viewport_width
3589 							- pos_cpy.x + 2 * viewport_x;
3590 				} else {
3591 					uint32_t temp_x = pos_cpy.x;
3592 
3593 					pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3594 					if (temp_x >= viewport_x +
3595 						(int)hubp->curs_attr.width || pos_cpy.x
3596 						<= (int)hubp->curs_attr.width +
3597 						pipe_ctx->plane_state->src_rect.x) {
3598 						pos_cpy.x = 2 * viewport_width - temp_x;
3599 					}
3600 				}
3601 			} else {
3602 				pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3603 			}
3604 		}
3605 	}
3606 	// Swap axis and mirror horizontally
3607 	else if (param.rotation == ROTATION_ANGLE_90) {
3608 		uint32_t temp_x = pos_cpy.x;
3609 
3610 		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3611 				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3612 		pos_cpy.y = temp_x;
3613 	}
3614 	// Swap axis and mirror vertically
3615 	else if (param.rotation == ROTATION_ANGLE_270) {
3616 		uint32_t temp_y = pos_cpy.y;
3617 		int viewport_height =
3618 			pipe_ctx->plane_res.scl_data.viewport.height;
3619 		int viewport_y =
3620 			pipe_ctx->plane_res.scl_data.viewport.y;
3621 
3622 		/**
3623 		 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3624 		 * For pipe split cases:
3625 		 * - apply offset of viewport.y to normalize pos_cpy.x
3626 		 * - calculate the pos_cpy.y as before
3627 		 * - shift pos_cpy.y back by same offset to get final value
3628 		 * - since we iterate through both pipes, use the lower
3629 		 *   viewport.y for offset
3630 		 * For non pipe split cases, use the same calculation for
3631 		 *  pos_cpy.y as the 180 degree rotation case below,
3632 		 *  but use pos_cpy.x as our input because we are rotating
3633 		 *  270 degrees
3634 		 */
3635 		if (pipe_split_on || odm_combine_on) {
3636 			int pos_cpy_x_offset;
3637 			int other_pipe_viewport_y;
3638 
3639 			if (pipe_split_on) {
3640 				if (pipe_ctx->bottom_pipe) {
3641 					other_pipe_viewport_y =
3642 						pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3643 				} else {
3644 					other_pipe_viewport_y =
3645 						pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3646 				}
3647 			} else {
3648 				if (pipe_ctx->next_odm_pipe) {
3649 					other_pipe_viewport_y =
3650 						pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3651 				} else {
3652 					other_pipe_viewport_y =
3653 						pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3654 				}
3655 			}
3656 			pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3657 				other_pipe_viewport_y : viewport_y;
3658 			pos_cpy.x -= pos_cpy_x_offset;
3659 			if (pos_cpy.x > viewport_height) {
3660 				pos_cpy.x = pos_cpy.x - viewport_height;
3661 				pos_cpy.y = viewport_height - pos_cpy.x;
3662 			} else {
3663 				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3664 			}
3665 			pos_cpy.y += pos_cpy_x_offset;
3666 		} else {
3667 			pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3668 		}
3669 		pos_cpy.x = temp_y;
3670 	}
3671 	// Mirror horizontally and vertically
3672 	else if (param.rotation == ROTATION_ANGLE_180) {
3673 		int viewport_width =
3674 			pipe_ctx->plane_res.scl_data.viewport.width;
3675 		int viewport_x =
3676 			pipe_ctx->plane_res.scl_data.viewport.x;
3677 
3678 		if (!param.mirror) {
3679 			if (pipe_split_on || odm_combine_on) {
3680 				if (pos_cpy.x >= viewport_width + viewport_x) {
3681 					pos_cpy.x = 2 * viewport_width
3682 							- pos_cpy.x + 2 * viewport_x;
3683 				} else {
3684 					uint32_t temp_x = pos_cpy.x;
3685 
3686 					pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3687 					if (temp_x >= viewport_x +
3688 						(int)hubp->curs_attr.width || pos_cpy.x
3689 						<= (int)hubp->curs_attr.width +
3690 						pipe_ctx->plane_state->src_rect.x) {
3691 						pos_cpy.x = temp_x + viewport_width;
3692 					}
3693 				}
3694 			} else {
3695 				pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3696 			}
3697 		}
3698 
3699 		/**
3700 		 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3701 		 * Calculation:
3702 		 *   delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3703 		 *   pos_cpy.y_new = viewport.y + delta_from_bottom
3704 		 * Simplify it as:
3705 		 *   pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3706 		 */
3707 		pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3708 			pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3709 	}
3710 
3711 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3712 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3713 }
3714 
dcn10_set_cursor_attribute(struct pipe_ctx * pipe_ctx)3715 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3716 {
3717 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3718 
3719 	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3720 			pipe_ctx->plane_res.hubp, attributes);
3721 	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3722 		pipe_ctx->plane_res.dpp, attributes);
3723 }
3724 
dcn10_set_cursor_sdr_white_level(struct pipe_ctx * pipe_ctx)3725 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3726 {
3727 	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3728 	struct fixed31_32 multiplier;
3729 	struct dpp_cursor_attributes opt_attr = { 0 };
3730 	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3731 	struct custom_float_format fmt;
3732 
3733 	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3734 		return;
3735 
3736 	fmt.exponenta_bits = 5;
3737 	fmt.mantissa_bits = 10;
3738 	fmt.sign = true;
3739 
3740 	if (sdr_white_level > 80) {
3741 		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3742 		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3743 	}
3744 
3745 	opt_attr.scale = hw_scale;
3746 	opt_attr.bias = 0;
3747 
3748 	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3749 			pipe_ctx->plane_res.dpp, &opt_attr);
3750 }
3751 
3752 /*
3753  * apply_front_porch_workaround  TODO FPGA still need?
3754  *
3755  * This is a workaround for a bug that has existed since R5xx and has not been
3756  * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3757  */
apply_front_porch_workaround(struct dc_crtc_timing * timing)3758 static void apply_front_porch_workaround(
3759 	struct dc_crtc_timing *timing)
3760 {
3761 	if (timing->flags.INTERLACE == 1) {
3762 		if (timing->v_front_porch < 2)
3763 			timing->v_front_porch = 2;
3764 	} else {
3765 		if (timing->v_front_porch < 1)
3766 			timing->v_front_porch = 1;
3767 	}
3768 }
3769 
dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx * pipe_ctx)3770 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3771 {
3772 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3773 	struct dc_crtc_timing patched_crtc_timing;
3774 	int vesa_sync_start;
3775 	int asic_blank_end;
3776 	int interlace_factor;
3777 
3778 	patched_crtc_timing = *dc_crtc_timing;
3779 	apply_front_porch_workaround(&patched_crtc_timing);
3780 
3781 	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3782 
3783 	vesa_sync_start = patched_crtc_timing.v_addressable +
3784 			patched_crtc_timing.v_border_bottom +
3785 			patched_crtc_timing.v_front_porch;
3786 
3787 	asic_blank_end = (patched_crtc_timing.v_total -
3788 			vesa_sync_start -
3789 			patched_crtc_timing.v_border_top)
3790 			* interlace_factor;
3791 
3792 	return asic_blank_end -
3793 			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3794 }
3795 
dcn10_calc_vupdate_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3796 void dcn10_calc_vupdate_position(
3797 		struct dc *dc,
3798 		struct pipe_ctx *pipe_ctx,
3799 		uint32_t *start_line,
3800 		uint32_t *end_line)
3801 {
3802 	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3803 	int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3804 
3805 	if (vupdate_pos >= 0)
3806 		*start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3807 	else
3808 		*start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3809 	*end_line = (*start_line + 2) % timing->v_total;
3810 }
3811 
dcn10_cal_vline_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3812 static void dcn10_cal_vline_position(
3813 		struct dc *dc,
3814 		struct pipe_ctx *pipe_ctx,
3815 		uint32_t *start_line,
3816 		uint32_t *end_line)
3817 {
3818 	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3819 	int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3820 
3821 	if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3822 		if (vline_pos > 0)
3823 			vline_pos--;
3824 		else if (vline_pos < 0)
3825 			vline_pos++;
3826 
3827 		vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3828 		if (vline_pos >= 0)
3829 			*start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3830 		else
3831 			*start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3832 		*end_line = (*start_line + 2) % timing->v_total;
3833 	} else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3834 		// vsync is line 0 so start_line is just the requested line offset
3835 		*start_line = vline_pos;
3836 		*end_line = (*start_line + 2) % timing->v_total;
3837 	} else
3838 		ASSERT(0);
3839 }
3840 
dcn10_setup_periodic_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3841 void dcn10_setup_periodic_interrupt(
3842 		struct dc *dc,
3843 		struct pipe_ctx *pipe_ctx)
3844 {
3845 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3846 	uint32_t start_line = 0;
3847 	uint32_t end_line = 0;
3848 
3849 	dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
3850 
3851 	tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3852 }
3853 
dcn10_setup_vupdate_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3854 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3855 {
3856 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3857 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3858 
3859 	if (start_line < 0) {
3860 		ASSERT(0);
3861 		start_line = 0;
3862 	}
3863 
3864 	if (tg->funcs->setup_vertical_interrupt2)
3865 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
3866 }
3867 
dcn10_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)3868 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3869 		struct dc_link_settings *link_settings)
3870 {
3871 	struct encoder_unblank_param params = {0};
3872 	struct dc_stream_state *stream = pipe_ctx->stream;
3873 	struct dc_link *link = stream->link;
3874 	struct dce_hwseq *hws = link->dc->hwseq;
3875 
3876 	/* only 3 items below are used by unblank */
3877 	params.timing = pipe_ctx->stream->timing;
3878 
3879 	params.link_settings.link_rate = link_settings->link_rate;
3880 
3881 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3882 		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3883 			params.timing.pix_clk_100hz /= 2;
3884 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
3885 	}
3886 
3887 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3888 		hws->funcs.edp_backlight_control(link, true);
3889 	}
3890 }
3891 
dcn10_send_immediate_sdp_message(struct pipe_ctx * pipe_ctx,const uint8_t * custom_sdp_message,unsigned int sdp_message_size)3892 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3893 				const uint8_t *custom_sdp_message,
3894 				unsigned int sdp_message_size)
3895 {
3896 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3897 		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3898 				pipe_ctx->stream_res.stream_enc,
3899 				custom_sdp_message,
3900 				sdp_message_size);
3901 	}
3902 }
dcn10_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)3903 enum dc_status dcn10_set_clock(struct dc *dc,
3904 			enum dc_clock_type clock_type,
3905 			uint32_t clk_khz,
3906 			uint32_t stepping)
3907 {
3908 	struct dc_state *context = dc->current_state;
3909 	struct dc_clock_config clock_cfg = {0};
3910 	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3911 
3912 	if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3913 		return DC_FAIL_UNSUPPORTED_1;
3914 
3915 	dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3916 		context, clock_type, &clock_cfg);
3917 
3918 	if (clk_khz > clock_cfg.max_clock_khz)
3919 		return DC_FAIL_CLK_EXCEED_MAX;
3920 
3921 	if (clk_khz < clock_cfg.min_clock_khz)
3922 		return DC_FAIL_CLK_BELOW_MIN;
3923 
3924 	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3925 		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3926 
3927 	/*update internal request clock for update clock use*/
3928 	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3929 		current_clocks->dispclk_khz = clk_khz;
3930 	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3931 		current_clocks->dppclk_khz = clk_khz;
3932 	else
3933 		return DC_ERROR_UNEXPECTED;
3934 
3935 	if (dc->clk_mgr->funcs->update_clocks)
3936 				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3937 				context, true);
3938 	return DC_OK;
3939 
3940 }
3941 
dcn10_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)3942 void dcn10_get_clock(struct dc *dc,
3943 			enum dc_clock_type clock_type,
3944 			struct dc_clock_config *clock_cfg)
3945 {
3946 	struct dc_state *context = dc->current_state;
3947 
3948 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3949 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3950 
3951 }
3952 
dcn10_get_dcc_en_bits(struct dc * dc,int * dcc_en_bits)3953 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3954 {
3955 	struct resource_pool *pool = dc->res_pool;
3956 	int i;
3957 
3958 	for (i = 0; i < pool->pipe_count; i++) {
3959 		struct hubp *hubp = pool->hubps[i];
3960 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3961 
3962 		hubp->funcs->hubp_read_state(hubp);
3963 
3964 		if (!s->blank_en)
3965 			dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3966 	}
3967 }
3968