1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
6 *
7 * Author: Rob Clark <[email protected]>
8 */
9
10 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
11
12 #include <linux/debugfs.h>
13 #include <linux/dma-buf.h>
14 #include <linux/of_irq.h>
15 #include <linux/pm_opp.h>
16
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_file.h>
19 #include <drm/drm_framebuffer.h>
20 #include <drm/drm_vblank.h>
21 #include <drm/drm_writeback.h>
22
23 #include "msm_drv.h"
24 #include "msm_mmu.h"
25 #include "msm_mdss.h"
26 #include "msm_gem.h"
27 #include "disp/msm_disp_snapshot.h"
28
29 #include "dpu_core_irq.h"
30 #include "dpu_crtc.h"
31 #include "dpu_encoder.h"
32 #include "dpu_formats.h"
33 #include "dpu_hw_vbif.h"
34 #include "dpu_kms.h"
35 #include "dpu_plane.h"
36 #include "dpu_vbif.h"
37 #include "dpu_writeback.h"
38
39 #define CREATE_TRACE_POINTS
40 #include "dpu_trace.h"
41
42 /*
43 * To enable overall DRM driver logging
44 * # echo 0x2 > /sys/module/drm/parameters/debug
45 *
46 * To enable DRM driver h/w logging
47 * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
48 *
49 * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
50 */
51 #define DPU_DEBUGFS_DIR "msm_dpu"
52 #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
53
54 bool dpu_use_virtual_planes;
55 module_param(dpu_use_virtual_planes, bool, 0);
56
57 static int dpu_kms_hw_init(struct msm_kms *kms);
58 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
59
60 #ifdef CONFIG_DEBUG_FS
_dpu_danger_signal_status(struct seq_file * s,bool danger_status)61 static int _dpu_danger_signal_status(struct seq_file *s,
62 bool danger_status)
63 {
64 struct dpu_danger_safe_status status;
65 struct dpu_kms *kms = s->private;
66 int i;
67
68 if (!kms->hw_mdp) {
69 DPU_ERROR("invalid arg(s)\n");
70 return 0;
71 }
72
73 memset(&status, 0, sizeof(struct dpu_danger_safe_status));
74
75 pm_runtime_get_sync(&kms->pdev->dev);
76 if (danger_status) {
77 seq_puts(s, "\nDanger signal status:\n");
78 if (kms->hw_mdp->ops.get_danger_status)
79 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
80 &status);
81 } else {
82 seq_puts(s, "\nSafe signal status:\n");
83 if (kms->hw_mdp->ops.get_safe_status)
84 kms->hw_mdp->ops.get_safe_status(kms->hw_mdp,
85 &status);
86 }
87 pm_runtime_put_sync(&kms->pdev->dev);
88
89 seq_printf(s, "MDP : 0x%x\n", status.mdp);
90
91 for (i = SSPP_VIG0; i < SSPP_MAX; i++)
92 seq_printf(s, "SSPP%d : 0x%x \n", i - SSPP_VIG0,
93 status.sspp[i]);
94 seq_puts(s, "\n");
95
96 return 0;
97 }
98
dpu_debugfs_danger_stats_show(struct seq_file * s,void * v)99 static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
100 {
101 return _dpu_danger_signal_status(s, true);
102 }
103 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats);
104
dpu_debugfs_safe_stats_show(struct seq_file * s,void * v)105 static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
106 {
107 return _dpu_danger_signal_status(s, false);
108 }
109 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats);
110
_dpu_plane_danger_read(struct file * file,char __user * buff,size_t count,loff_t * ppos)111 static ssize_t _dpu_plane_danger_read(struct file *file,
112 char __user *buff, size_t count, loff_t *ppos)
113 {
114 struct dpu_kms *kms = file->private_data;
115 int len;
116 char buf[40];
117
118 len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
119
120 return simple_read_from_buffer(buff, count, ppos, buf, len);
121 }
122
_dpu_plane_set_danger_state(struct dpu_kms * kms,bool enable)123 static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
124 {
125 struct drm_plane *plane;
126
127 drm_for_each_plane(plane, kms->dev) {
128 if (plane->fb && plane->state) {
129 dpu_plane_danger_signal_ctrl(plane, enable);
130 DPU_DEBUG("plane:%d img:%dx%d ",
131 plane->base.id, plane->fb->width,
132 plane->fb->height);
133 DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
134 plane->state->src_x >> 16,
135 plane->state->src_y >> 16,
136 plane->state->src_w >> 16,
137 plane->state->src_h >> 16,
138 plane->state->crtc_x, plane->state->crtc_y,
139 plane->state->crtc_w, plane->state->crtc_h);
140 } else {
141 DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
142 }
143 }
144 }
145
_dpu_plane_danger_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)146 static ssize_t _dpu_plane_danger_write(struct file *file,
147 const char __user *user_buf, size_t count, loff_t *ppos)
148 {
149 struct dpu_kms *kms = file->private_data;
150 int disable_panic;
151 int ret;
152
153 ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
154 if (ret)
155 return ret;
156
157 if (disable_panic) {
158 /* Disable panic signal for all active pipes */
159 DPU_DEBUG("Disabling danger:\n");
160 _dpu_plane_set_danger_state(kms, false);
161 kms->has_danger_ctrl = false;
162 } else {
163 /* Enable panic signal for all active pipes */
164 DPU_DEBUG("Enabling danger:\n");
165 kms->has_danger_ctrl = true;
166 _dpu_plane_set_danger_state(kms, true);
167 }
168
169 return count;
170 }
171
172 static const struct file_operations dpu_plane_danger_enable = {
173 .open = simple_open,
174 .read = _dpu_plane_danger_read,
175 .write = _dpu_plane_danger_write,
176 };
177
dpu_debugfs_danger_init(struct dpu_kms * dpu_kms,struct dentry * parent)178 static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
179 struct dentry *parent)
180 {
181 struct dentry *entry = debugfs_create_dir("danger", parent);
182
183 debugfs_create_file("danger_status", 0600, entry,
184 dpu_kms, &dpu_debugfs_danger_stats_fops);
185 debugfs_create_file("safe_status", 0600, entry,
186 dpu_kms, &dpu_debugfs_safe_stats_fops);
187 debugfs_create_file("disable_danger", 0600, entry,
188 dpu_kms, &dpu_plane_danger_enable);
189
190 }
191
192 /*
193 * Companion structure for dpu_debugfs_create_regset32.
194 */
195 struct dpu_debugfs_regset32 {
196 uint32_t offset;
197 uint32_t blk_len;
198 struct dpu_kms *dpu_kms;
199 };
200
dpu_regset32_show(struct seq_file * s,void * data)201 static int dpu_regset32_show(struct seq_file *s, void *data)
202 {
203 struct dpu_debugfs_regset32 *regset = s->private;
204 struct dpu_kms *dpu_kms = regset->dpu_kms;
205 void __iomem *base;
206 uint32_t i, addr;
207
208 if (!dpu_kms->mmio)
209 return 0;
210
211 base = dpu_kms->mmio + regset->offset;
212
213 /* insert padding spaces, if needed */
214 if (regset->offset & 0xF) {
215 seq_printf(s, "[%x]", regset->offset & ~0xF);
216 for (i = 0; i < (regset->offset & 0xF); i += 4)
217 seq_puts(s, " ");
218 }
219
220 pm_runtime_get_sync(&dpu_kms->pdev->dev);
221
222 /* main register output */
223 for (i = 0; i < regset->blk_len; i += 4) {
224 addr = regset->offset + i;
225 if ((addr & 0xF) == 0x0)
226 seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
227 seq_printf(s, " %08x", readl_relaxed(base + i));
228 }
229 seq_puts(s, "\n");
230 pm_runtime_put_sync(&dpu_kms->pdev->dev);
231
232 return 0;
233 }
234 DEFINE_SHOW_ATTRIBUTE(dpu_regset32);
235
236 /**
237 * dpu_debugfs_create_regset32 - Create register read back file for debugfs
238 *
239 * This function is almost identical to the standard debugfs_create_regset32()
240 * function, with the main difference being that a list of register
241 * names/offsets do not need to be provided. The 'read' function simply outputs
242 * sequential register values over a specified range.
243 *
244 * @name: File name within debugfs
245 * @mode: File mode within debugfs
246 * @parent: Parent directory entry within debugfs, can be NULL
247 * @offset: sub-block offset
248 * @length: sub-block length, in bytes
249 * @dpu_kms: pointer to dpu kms structure
250 */
dpu_debugfs_create_regset32(const char * name,umode_t mode,void * parent,uint32_t offset,uint32_t length,struct dpu_kms * dpu_kms)251 void dpu_debugfs_create_regset32(const char *name, umode_t mode,
252 void *parent,
253 uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
254 {
255 struct dpu_debugfs_regset32 *regset;
256
257 if (WARN_ON(!name || !dpu_kms || !length))
258 return;
259
260 regset = devm_kzalloc(&dpu_kms->pdev->dev, sizeof(*regset), GFP_KERNEL);
261 if (!regset)
262 return;
263
264 /* make sure offset is a multiple of 4 */
265 regset->offset = round_down(offset, 4);
266 regset->blk_len = length;
267 regset->dpu_kms = dpu_kms;
268
269 debugfs_create_file(name, mode, parent, regset, &dpu_regset32_fops);
270 }
271
dpu_debugfs_sspp_init(struct dpu_kms * dpu_kms,struct dentry * debugfs_root)272 static void dpu_debugfs_sspp_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
273 {
274 struct dentry *entry = debugfs_create_dir("sspp", debugfs_root);
275 int i;
276
277 if (IS_ERR(entry))
278 return;
279
280 for (i = SSPP_NONE; i < SSPP_MAX; i++) {
281 struct dpu_hw_sspp *hw = dpu_rm_get_sspp(&dpu_kms->rm, i);
282
283 if (!hw)
284 continue;
285
286 _dpu_hw_sspp_init_debugfs(hw, dpu_kms, entry);
287 }
288 }
289
dpu_kms_debugfs_init(struct msm_kms * kms,struct drm_minor * minor)290 static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
291 {
292 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
293 void *p = dpu_hw_util_get_log_mask_ptr();
294 struct dentry *entry;
295
296 if (!p)
297 return -EINVAL;
298
299 /* Only create a set of debugfs for the primary node, ignore render nodes */
300 if (minor->type != DRM_MINOR_PRIMARY)
301 return 0;
302
303 entry = debugfs_create_dir("debug", minor->debugfs_root);
304
305 debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
306
307 dpu_debugfs_danger_init(dpu_kms, entry);
308 dpu_debugfs_vbif_init(dpu_kms, entry);
309 dpu_debugfs_core_irq_init(dpu_kms, entry);
310 dpu_debugfs_sspp_init(dpu_kms, entry);
311
312 return dpu_core_perf_debugfs_init(dpu_kms, entry);
313 }
314 #endif
315
316 /* Global/shared object state funcs */
317
318 /*
319 * This is a helper that returns the private state currently in operation.
320 * Note that this would return the "old_state" if called in the atomic check
321 * path, and the "new_state" after the atomic swap has been done.
322 */
323 struct dpu_global_state *
dpu_kms_get_existing_global_state(struct dpu_kms * dpu_kms)324 dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms)
325 {
326 return to_dpu_global_state(dpu_kms->global_state.state);
327 }
328
329 /*
330 * This acquires the modeset lock set aside for global state, creates
331 * a new duplicated private object state.
332 */
dpu_kms_get_global_state(struct drm_atomic_state * s)333 struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
334 {
335 struct msm_drm_private *priv = s->dev->dev_private;
336 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
337 struct drm_private_state *priv_state;
338
339 priv_state = drm_atomic_get_private_obj_state(s,
340 &dpu_kms->global_state);
341 if (IS_ERR(priv_state))
342 return ERR_CAST(priv_state);
343
344 return to_dpu_global_state(priv_state);
345 }
346
347 static struct drm_private_state *
dpu_kms_global_duplicate_state(struct drm_private_obj * obj)348 dpu_kms_global_duplicate_state(struct drm_private_obj *obj)
349 {
350 struct dpu_global_state *state;
351
352 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
353 if (!state)
354 return NULL;
355
356 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
357
358 return &state->base;
359 }
360
dpu_kms_global_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)361 static void dpu_kms_global_destroy_state(struct drm_private_obj *obj,
362 struct drm_private_state *state)
363 {
364 struct dpu_global_state *dpu_state = to_dpu_global_state(state);
365
366 kfree(dpu_state);
367 }
368
dpu_kms_global_print_state(struct drm_printer * p,const struct drm_private_state * state)369 static void dpu_kms_global_print_state(struct drm_printer *p,
370 const struct drm_private_state *state)
371 {
372 const struct dpu_global_state *global_state = to_dpu_global_state(state);
373
374 dpu_rm_print_state(p, global_state);
375 }
376
377 static const struct drm_private_state_funcs dpu_kms_global_state_funcs = {
378 .atomic_duplicate_state = dpu_kms_global_duplicate_state,
379 .atomic_destroy_state = dpu_kms_global_destroy_state,
380 .atomic_print_state = dpu_kms_global_print_state,
381 };
382
dpu_kms_global_obj_init(struct dpu_kms * dpu_kms)383 static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
384 {
385 struct dpu_global_state *state;
386
387 state = kzalloc(sizeof(*state), GFP_KERNEL);
388 if (!state)
389 return -ENOMEM;
390
391 drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
392 &state->base,
393 &dpu_kms_global_state_funcs);
394
395 state->rm = &dpu_kms->rm;
396
397 return 0;
398 }
399
dpu_kms_global_obj_fini(struct dpu_kms * dpu_kms)400 static void dpu_kms_global_obj_fini(struct dpu_kms *dpu_kms)
401 {
402 drm_atomic_private_obj_fini(&dpu_kms->global_state);
403 }
404
dpu_kms_parse_data_bus_icc_path(struct dpu_kms * dpu_kms)405 static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
406 {
407 struct icc_path *path0;
408 struct icc_path *path1;
409 struct device *dpu_dev = &dpu_kms->pdev->dev;
410
411 path0 = msm_icc_get(dpu_dev, "mdp0-mem");
412 path1 = msm_icc_get(dpu_dev, "mdp1-mem");
413
414 if (IS_ERR_OR_NULL(path0))
415 return PTR_ERR_OR_ZERO(path0);
416
417 dpu_kms->path[0] = path0;
418 dpu_kms->num_paths = 1;
419
420 if (!IS_ERR_OR_NULL(path1)) {
421 dpu_kms->path[1] = path1;
422 dpu_kms->num_paths++;
423 }
424 return 0;
425 }
426
dpu_kms_enable_vblank(struct msm_kms * kms,struct drm_crtc * crtc)427 static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
428 {
429 return dpu_crtc_vblank(crtc, true);
430 }
431
dpu_kms_disable_vblank(struct msm_kms * kms,struct drm_crtc * crtc)432 static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
433 {
434 dpu_crtc_vblank(crtc, false);
435 }
436
dpu_kms_enable_commit(struct msm_kms * kms)437 static void dpu_kms_enable_commit(struct msm_kms *kms)
438 {
439 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
440 pm_runtime_get_sync(&dpu_kms->pdev->dev);
441 }
442
dpu_kms_disable_commit(struct msm_kms * kms)443 static void dpu_kms_disable_commit(struct msm_kms *kms)
444 {
445 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
446 pm_runtime_put_sync(&dpu_kms->pdev->dev);
447 }
448
dpu_kms_check_mode_changed(struct msm_kms * kms,struct drm_atomic_state * state)449 static int dpu_kms_check_mode_changed(struct msm_kms *kms, struct drm_atomic_state *state)
450 {
451 struct drm_crtc_state *new_crtc_state;
452 struct drm_connector *connector;
453 struct drm_connector_state *new_conn_state;
454 int i;
455
456 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
457 struct drm_encoder *encoder;
458
459 if (!new_conn_state->crtc || !new_conn_state->best_encoder)
460 continue;
461
462 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
463
464 encoder = new_conn_state->best_encoder;
465
466 dpu_encoder_virt_check_mode_changed(encoder, new_crtc_state, new_conn_state);
467 }
468
469 return 0;
470 }
471
dpu_kms_flush_commit(struct msm_kms * kms,unsigned crtc_mask)472 static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
473 {
474 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
475 struct drm_crtc *crtc;
476
477 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) {
478 if (!crtc->state->active)
479 continue;
480
481 trace_dpu_kms_commit(DRMID(crtc));
482 dpu_crtc_commit_kickoff(crtc);
483 }
484 }
485
dpu_kms_complete_commit(struct msm_kms * kms,unsigned crtc_mask)486 static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
487 {
488 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
489 struct drm_crtc *crtc;
490
491 DPU_ATRACE_BEGIN("kms_complete_commit");
492
493 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
494 dpu_crtc_complete_commit(crtc);
495
496 DPU_ATRACE_END("kms_complete_commit");
497 }
498
dpu_kms_wait_for_commit_done(struct msm_kms * kms,struct drm_crtc * crtc)499 static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
500 struct drm_crtc *crtc)
501 {
502 struct drm_encoder *encoder;
503 struct drm_device *dev;
504 int ret;
505
506 if (!kms || !crtc || !crtc->state) {
507 DPU_ERROR("invalid params\n");
508 return;
509 }
510
511 dev = crtc->dev;
512
513 if (!crtc->state->enable) {
514 DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
515 return;
516 }
517
518 if (!drm_atomic_crtc_effectively_active(crtc->state)) {
519 DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
520 return;
521 }
522
523 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
524 if (encoder->crtc != crtc)
525 continue;
526 /*
527 * Wait for post-flush if necessary to delay before
528 * plane_cleanup. For example, wait for vsync in case of video
529 * mode panels. This may be a no-op for command mode panels.
530 */
531 trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
532 ret = dpu_encoder_wait_for_commit_done(encoder);
533 if (ret && ret != -EWOULDBLOCK) {
534 DPU_ERROR("wait for commit done returned %d\n", ret);
535 break;
536 }
537 }
538 }
539
dpu_kms_wait_flush(struct msm_kms * kms,unsigned crtc_mask)540 static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
541 {
542 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
543 struct drm_crtc *crtc;
544
545 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
546 dpu_kms_wait_for_commit_done(kms, crtc);
547 }
548
549 static const char *dpu_vsync_sources[] = {
550 [DPU_VSYNC_SOURCE_GPIO_0] = "mdp_vsync_p",
551 [DPU_VSYNC_SOURCE_GPIO_1] = "mdp_vsync_s",
552 [DPU_VSYNC_SOURCE_GPIO_2] = "mdp_vsync_e",
553 [DPU_VSYNC_SOURCE_INTF_0] = "mdp_intf0",
554 [DPU_VSYNC_SOURCE_INTF_1] = "mdp_intf1",
555 [DPU_VSYNC_SOURCE_INTF_2] = "mdp_intf2",
556 [DPU_VSYNC_SOURCE_INTF_3] = "mdp_intf3",
557 [DPU_VSYNC_SOURCE_WD_TIMER_0] = "timer0",
558 [DPU_VSYNC_SOURCE_WD_TIMER_1] = "timer1",
559 [DPU_VSYNC_SOURCE_WD_TIMER_2] = "timer2",
560 [DPU_VSYNC_SOURCE_WD_TIMER_3] = "timer3",
561 [DPU_VSYNC_SOURCE_WD_TIMER_4] = "timer4",
562 };
563
dpu_kms_dsi_set_te_source(struct msm_display_info * info,struct msm_dsi * dsi)564 static int dpu_kms_dsi_set_te_source(struct msm_display_info *info,
565 struct msm_dsi *dsi)
566 {
567 const char *te_source = msm_dsi_get_te_source(dsi);
568 int i;
569
570 if (!te_source) {
571 info->vsync_source = DPU_VSYNC_SOURCE_GPIO_0;
572 return 0;
573 }
574
575 /* we can not use match_string since dpu_vsync_sources is a sparse array */
576 for (i = 0; i < ARRAY_SIZE(dpu_vsync_sources); i++) {
577 if (dpu_vsync_sources[i] &&
578 !strcmp(dpu_vsync_sources[i], te_source)) {
579 info->vsync_source = i;
580 return 0;
581 }
582 }
583
584 return -EINVAL;
585 }
586
_dpu_kms_initialize_dsi(struct drm_device * dev,struct msm_drm_private * priv,struct dpu_kms * dpu_kms)587 static int _dpu_kms_initialize_dsi(struct drm_device *dev,
588 struct msm_drm_private *priv,
589 struct dpu_kms *dpu_kms)
590 {
591 struct drm_encoder *encoder = NULL;
592 struct msm_display_info info;
593 int i, rc = 0;
594
595 if (!(priv->dsi[0] || priv->dsi[1]))
596 return rc;
597
598 /*
599 * We support following confiurations:
600 * - Single DSI host (dsi0 or dsi1)
601 * - Two independent DSI hosts
602 * - Bonded DSI0 and DSI1 hosts
603 *
604 * TODO: Support swapping DSI0 and DSI1 in the bonded setup.
605 */
606 for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
607 int other = (i + 1) % 2;
608
609 if (!priv->dsi[i])
610 continue;
611
612 if (msm_dsi_is_bonded_dsi(priv->dsi[i]) &&
613 !msm_dsi_is_master_dsi(priv->dsi[i]))
614 continue;
615
616 memset(&info, 0, sizeof(info));
617 info.intf_type = INTF_DSI;
618
619 info.h_tile_instance[info.num_of_h_tiles++] = i;
620 if (msm_dsi_is_bonded_dsi(priv->dsi[i]))
621 info.h_tile_instance[info.num_of_h_tiles++] = other;
622
623 info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->dsi[i]);
624
625 rc = dpu_kms_dsi_set_te_source(&info, priv->dsi[i]);
626 if (rc) {
627 DPU_ERROR("failed to identify TE source for dsi display\n");
628 return rc;
629 }
630
631 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI, &info);
632 if (IS_ERR(encoder)) {
633 DPU_ERROR("encoder init failed for dsi display\n");
634 return PTR_ERR(encoder);
635 }
636
637 rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
638 if (rc) {
639 DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
640 i, rc);
641 break;
642 }
643
644 if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) {
645 rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder);
646 if (rc) {
647 DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
648 other, rc);
649 break;
650 }
651 }
652 }
653
654 return rc;
655 }
656
_dpu_kms_initialize_displayport(struct drm_device * dev,struct msm_drm_private * priv,struct dpu_kms * dpu_kms)657 static int _dpu_kms_initialize_displayport(struct drm_device *dev,
658 struct msm_drm_private *priv,
659 struct dpu_kms *dpu_kms)
660 {
661 struct drm_encoder *encoder = NULL;
662 struct msm_display_info info;
663 bool yuv_supported;
664 int rc;
665 int i;
666
667 for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
668 if (!priv->dp[i])
669 continue;
670
671 memset(&info, 0, sizeof(info));
672 info.num_of_h_tiles = 1;
673 info.h_tile_instance[0] = i;
674 info.intf_type = INTF_DP;
675
676 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS, &info);
677 if (IS_ERR(encoder)) {
678 DPU_ERROR("encoder init failed for dsi display\n");
679 return PTR_ERR(encoder);
680 }
681
682 yuv_supported = !!dpu_kms->catalog->cdm;
683 rc = msm_dp_modeset_init(priv->dp[i], dev, encoder, yuv_supported);
684 if (rc) {
685 DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
686 return rc;
687 }
688 }
689
690 return 0;
691 }
692
_dpu_kms_initialize_hdmi(struct drm_device * dev,struct msm_drm_private * priv,struct dpu_kms * dpu_kms)693 static int _dpu_kms_initialize_hdmi(struct drm_device *dev,
694 struct msm_drm_private *priv,
695 struct dpu_kms *dpu_kms)
696 {
697 struct drm_encoder *encoder = NULL;
698 struct msm_display_info info;
699 int rc;
700
701 if (!priv->hdmi)
702 return 0;
703
704 memset(&info, 0, sizeof(info));
705 info.num_of_h_tiles = 1;
706 info.h_tile_instance[0] = 0;
707 info.intf_type = INTF_HDMI;
708
709 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS, &info);
710 if (IS_ERR(encoder)) {
711 DPU_ERROR("encoder init failed for HDMI display\n");
712 return PTR_ERR(encoder);
713 }
714
715 rc = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
716 if (rc) {
717 DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
718 return rc;
719 }
720
721 return 0;
722 }
723
_dpu_kms_initialize_writeback(struct drm_device * dev,struct msm_drm_private * priv,struct dpu_kms * dpu_kms,const u32 * wb_formats,int n_formats)724 static int _dpu_kms_initialize_writeback(struct drm_device *dev,
725 struct msm_drm_private *priv, struct dpu_kms *dpu_kms,
726 const u32 *wb_formats, int n_formats)
727 {
728 struct drm_encoder *encoder = NULL;
729 struct msm_display_info info;
730 const enum dpu_wb wb_idx = WB_2;
731 u32 maxlinewidth;
732 int rc;
733
734 memset(&info, 0, sizeof(info));
735
736 info.num_of_h_tiles = 1;
737 /* use only WB idx 2 instance for DPU */
738 info.h_tile_instance[0] = wb_idx;
739 info.intf_type = INTF_WB;
740
741 maxlinewidth = dpu_rm_get_wb(&dpu_kms->rm, info.h_tile_instance[0])->caps->maxlinewidth;
742
743 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_VIRTUAL, &info);
744 if (IS_ERR(encoder)) {
745 DPU_ERROR("encoder init failed for dsi display\n");
746 return PTR_ERR(encoder);
747 }
748
749 rc = dpu_writeback_init(dev, encoder, wb_formats, n_formats, maxlinewidth);
750 if (rc) {
751 DPU_ERROR("dpu_writeback_init, rc = %d\n", rc);
752 return rc;
753 }
754
755 return 0;
756 }
757
758 /**
759 * _dpu_kms_setup_displays - create encoders, bridges and connectors
760 * for underlying displays
761 * @dev: Pointer to drm device structure
762 * @priv: Pointer to private drm device data
763 * @dpu_kms: Pointer to dpu kms structure
764 * Returns: Zero on success
765 */
_dpu_kms_setup_displays(struct drm_device * dev,struct msm_drm_private * priv,struct dpu_kms * dpu_kms)766 static int _dpu_kms_setup_displays(struct drm_device *dev,
767 struct msm_drm_private *priv,
768 struct dpu_kms *dpu_kms)
769 {
770 int rc = 0;
771 int i;
772
773 rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
774 if (rc) {
775 DPU_ERROR("initialize_dsi failed, rc = %d\n", rc);
776 return rc;
777 }
778
779 rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms);
780 if (rc) {
781 DPU_ERROR("initialize_DP failed, rc = %d\n", rc);
782 return rc;
783 }
784
785 rc = _dpu_kms_initialize_hdmi(dev, priv, dpu_kms);
786 if (rc) {
787 DPU_ERROR("initialize HDMI failed, rc = %d\n", rc);
788 return rc;
789 }
790
791 /* Since WB isn't a driver check the catalog before initializing */
792 if (dpu_kms->catalog->wb_count) {
793 for (i = 0; i < dpu_kms->catalog->wb_count; i++) {
794 if (dpu_kms->catalog->wb[i].id == WB_2) {
795 rc = _dpu_kms_initialize_writeback(dev, priv, dpu_kms,
796 dpu_kms->catalog->wb[i].format_list,
797 dpu_kms->catalog->wb[i].num_formats);
798 if (rc) {
799 DPU_ERROR("initialize_WB failed, rc = %d\n", rc);
800 return rc;
801 }
802 }
803 }
804 }
805
806 return rc;
807 }
808
809 #define MAX_PLANES 20
_dpu_kms_drm_obj_init(struct dpu_kms * dpu_kms)810 static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
811 {
812 struct drm_device *dev;
813 struct drm_plane *primary_planes[MAX_PLANES], *plane;
814 struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
815 struct drm_crtc *crtc;
816 struct drm_encoder *encoder;
817 unsigned int num_encoders;
818
819 struct msm_drm_private *priv;
820 const struct dpu_mdss_cfg *catalog;
821
822 int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
823 int max_crtc_count;
824 dev = dpu_kms->dev;
825 priv = dev->dev_private;
826 catalog = dpu_kms->catalog;
827
828 /*
829 * Create encoder and query display drivers to create
830 * bridges and connectors
831 */
832 ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
833 if (ret)
834 return ret;
835
836 num_encoders = 0;
837 drm_for_each_encoder(encoder, dev)
838 num_encoders++;
839
840 max_crtc_count = min(catalog->mixer_count, num_encoders);
841
842 /* Create the planes, keeping track of one primary/cursor per crtc */
843 for (i = 0; i < catalog->sspp_count; i++) {
844 enum drm_plane_type type;
845
846 if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
847 && cursor_planes_idx < max_crtc_count)
848 type = DRM_PLANE_TYPE_CURSOR;
849 else if (primary_planes_idx < max_crtc_count)
850 type = DRM_PLANE_TYPE_PRIMARY;
851 else
852 type = DRM_PLANE_TYPE_OVERLAY;
853
854 DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
855 type, catalog->sspp[i].features,
856 catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
857
858 if (dpu_use_virtual_planes)
859 plane = dpu_plane_init_virtual(dev, type, (1UL << max_crtc_count) - 1);
860 else
861 plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
862 (1UL << max_crtc_count) - 1);
863 if (IS_ERR(plane)) {
864 DPU_ERROR("dpu_plane_init failed\n");
865 ret = PTR_ERR(plane);
866 return ret;
867 }
868
869 if (type == DRM_PLANE_TYPE_CURSOR)
870 cursor_planes[cursor_planes_idx++] = plane;
871 else if (type == DRM_PLANE_TYPE_PRIMARY)
872 primary_planes[primary_planes_idx++] = plane;
873 }
874
875 max_crtc_count = min(max_crtc_count, primary_planes_idx);
876
877 /* Create one CRTC per encoder */
878 for (i = 0; i < max_crtc_count; i++) {
879 crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
880 if (IS_ERR(crtc)) {
881 ret = PTR_ERR(crtc);
882 return ret;
883 }
884 priv->num_crtcs++;
885 }
886
887 /* All CRTCs are compatible with all encoders */
888 drm_for_each_encoder(encoder, dev)
889 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
890
891 return 0;
892 }
893
_dpu_kms_hw_destroy(struct dpu_kms * dpu_kms)894 static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
895 {
896 int i;
897
898 dpu_kms->hw_intr = NULL;
899
900 /* safe to call these more than once during shutdown */
901 _dpu_kms_mmu_destroy(dpu_kms);
902
903 for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
904 dpu_kms->hw_vbif[i] = NULL;
905 }
906
907 dpu_kms_global_obj_fini(dpu_kms);
908
909 dpu_kms->catalog = NULL;
910
911 dpu_kms->hw_mdp = NULL;
912 }
913
dpu_kms_destroy(struct msm_kms * kms)914 static void dpu_kms_destroy(struct msm_kms *kms)
915 {
916 struct dpu_kms *dpu_kms;
917
918 if (!kms) {
919 DPU_ERROR("invalid kms\n");
920 return;
921 }
922
923 dpu_kms = to_dpu_kms(kms);
924
925 _dpu_kms_hw_destroy(dpu_kms);
926
927 msm_kms_destroy(&dpu_kms->base);
928
929 if (dpu_kms->rpm_enabled)
930 pm_runtime_disable(&dpu_kms->pdev->dev);
931 }
932
dpu_irq_postinstall(struct msm_kms * kms)933 static int dpu_irq_postinstall(struct msm_kms *kms)
934 {
935 struct msm_drm_private *priv;
936 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
937
938 if (!dpu_kms || !dpu_kms->dev)
939 return -EINVAL;
940
941 priv = dpu_kms->dev->dev_private;
942 if (!priv)
943 return -EINVAL;
944
945 return 0;
946 }
947
dpu_kms_mdp_snapshot(struct msm_disp_state * disp_state,struct msm_kms * kms)948 static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_kms *kms)
949 {
950 int i;
951 struct dpu_kms *dpu_kms;
952 const struct dpu_mdss_cfg *cat;
953 void __iomem *base;
954
955 dpu_kms = to_dpu_kms(kms);
956
957 cat = dpu_kms->catalog;
958
959 pm_runtime_get_sync(&dpu_kms->pdev->dev);
960
961 /* dump CTL sub-blocks HW regs info */
962 for (i = 0; i < cat->ctl_count; i++)
963 msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len,
964 dpu_kms->mmio + cat->ctl[i].base, "%s",
965 cat->ctl[i].name);
966
967 /* dump DSPP sub-blocks HW regs info */
968 for (i = 0; i < cat->dspp_count; i++) {
969 base = dpu_kms->mmio + cat->dspp[i].base;
970 msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len, base,
971 "%s", cat->dspp[i].name);
972
973 if (cat->dspp[i].sblk && cat->dspp[i].sblk->pcc.len > 0)
974 msm_disp_snapshot_add_block(disp_state, cat->dspp[i].sblk->pcc.len,
975 base + cat->dspp[i].sblk->pcc.base, "%s_%s",
976 cat->dspp[i].name,
977 cat->dspp[i].sblk->pcc.name);
978 }
979
980 /* dump INTF sub-blocks HW regs info */
981 for (i = 0; i < cat->intf_count; i++)
982 msm_disp_snapshot_add_block(disp_state, cat->intf[i].len,
983 dpu_kms->mmio + cat->intf[i].base, "%s",
984 cat->intf[i].name);
985
986 /* dump PP sub-blocks HW regs info */
987 for (i = 0; i < cat->pingpong_count; i++) {
988 base = dpu_kms->mmio + cat->pingpong[i].base;
989 msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len, base,
990 "%s", cat->pingpong[i].name);
991
992 /* TE2 sub-block has length of 0, so will not print it */
993
994 if (cat->pingpong[i].sblk && cat->pingpong[i].sblk->dither.len > 0)
995 msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].sblk->dither.len,
996 base + cat->pingpong[i].sblk->dither.base,
997 "%s_%s", cat->pingpong[i].name,
998 cat->pingpong[i].sblk->dither.name);
999 }
1000
1001 /* dump SSPP sub-blocks HW regs info */
1002 for (i = 0; i < cat->sspp_count; i++) {
1003 base = dpu_kms->mmio + cat->sspp[i].base;
1004 msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len, base,
1005 "%s", cat->sspp[i].name);
1006
1007 if (cat->sspp[i].sblk && cat->sspp[i].sblk->scaler_blk.len > 0)
1008 msm_disp_snapshot_add_block(disp_state, cat->sspp[i].sblk->scaler_blk.len,
1009 base + cat->sspp[i].sblk->scaler_blk.base,
1010 "%s_%s", cat->sspp[i].name,
1011 cat->sspp[i].sblk->scaler_blk.name);
1012
1013 if (cat->sspp[i].sblk && cat->sspp[i].sblk->csc_blk.len > 0)
1014 msm_disp_snapshot_add_block(disp_state, cat->sspp[i].sblk->csc_blk.len,
1015 base + cat->sspp[i].sblk->csc_blk.base,
1016 "%s_%s", cat->sspp[i].name,
1017 cat->sspp[i].sblk->csc_blk.name);
1018 }
1019
1020 /* dump LM sub-blocks HW regs info */
1021 for (i = 0; i < cat->mixer_count; i++)
1022 msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
1023 dpu_kms->mmio + cat->mixer[i].base,
1024 "%s", cat->mixer[i].name);
1025
1026 /* dump WB sub-blocks HW regs info */
1027 for (i = 0; i < cat->wb_count; i++)
1028 msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
1029 dpu_kms->mmio + cat->wb[i].base, "%s",
1030 cat->wb[i].name);
1031
1032 if (cat->mdp[0].features & BIT(DPU_MDP_PERIPH_0_REMOVED)) {
1033 msm_disp_snapshot_add_block(disp_state, MDP_PERIPH_TOP0,
1034 dpu_kms->mmio + cat->mdp[0].base, "top");
1035 msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len - MDP_PERIPH_TOP0_END,
1036 dpu_kms->mmio + cat->mdp[0].base + MDP_PERIPH_TOP0_END, "top_2");
1037 } else {
1038 msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len,
1039 dpu_kms->mmio + cat->mdp[0].base, "top");
1040 }
1041
1042 /* dump CWB sub-blocks HW regs info */
1043 for (i = 0; i < cat->cwb_count; i++)
1044 msm_disp_snapshot_add_block(disp_state, cat->cwb[i].len,
1045 dpu_kms->mmio + cat->cwb[i].base, cat->cwb[i].name);
1046
1047 /* dump DSC sub-blocks HW regs info */
1048 for (i = 0; i < cat->dsc_count; i++) {
1049 base = dpu_kms->mmio + cat->dsc[i].base;
1050 msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len, base,
1051 "%s", cat->dsc[i].name);
1052
1053 if (cat->dsc[i].features & BIT(DPU_DSC_HW_REV_1_2)) {
1054 struct dpu_dsc_blk enc = cat->dsc[i].sblk->enc;
1055 struct dpu_dsc_blk ctl = cat->dsc[i].sblk->ctl;
1056
1057 msm_disp_snapshot_add_block(disp_state, enc.len, base + enc.base, "%s_%s",
1058 cat->dsc[i].name, enc.name);
1059 msm_disp_snapshot_add_block(disp_state, ctl.len, base + ctl.base, "%s_%s",
1060 cat->dsc[i].name, ctl.name);
1061 }
1062 }
1063
1064 if (cat->cdm)
1065 msm_disp_snapshot_add_block(disp_state, cat->cdm->len,
1066 dpu_kms->mmio + cat->cdm->base,
1067 "%s", cat->cdm->name);
1068
1069 for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
1070 const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
1071
1072 msm_disp_snapshot_add_block(disp_state, vbif->len,
1073 dpu_kms->vbif[vbif->id] + vbif->base,
1074 "%s", vbif->name);
1075 }
1076
1077 pm_runtime_put_sync(&dpu_kms->pdev->dev);
1078 }
1079
1080 static const struct msm_kms_funcs kms_funcs = {
1081 .hw_init = dpu_kms_hw_init,
1082 .irq_preinstall = dpu_core_irq_preinstall,
1083 .irq_postinstall = dpu_irq_postinstall,
1084 .irq_uninstall = dpu_core_irq_uninstall,
1085 .irq = dpu_core_irq,
1086 .enable_commit = dpu_kms_enable_commit,
1087 .disable_commit = dpu_kms_disable_commit,
1088 .check_mode_changed = dpu_kms_check_mode_changed,
1089 .flush_commit = dpu_kms_flush_commit,
1090 .wait_flush = dpu_kms_wait_flush,
1091 .complete_commit = dpu_kms_complete_commit,
1092 .enable_vblank = dpu_kms_enable_vblank,
1093 .disable_vblank = dpu_kms_disable_vblank,
1094 .destroy = dpu_kms_destroy,
1095 .snapshot = dpu_kms_mdp_snapshot,
1096 #ifdef CONFIG_DEBUG_FS
1097 .debugfs_init = dpu_kms_debugfs_init,
1098 #endif
1099 };
1100
_dpu_kms_mmu_destroy(struct dpu_kms * dpu_kms)1101 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
1102 {
1103 struct msm_mmu *mmu;
1104
1105 if (!dpu_kms->base.aspace)
1106 return;
1107
1108 mmu = dpu_kms->base.aspace->mmu;
1109
1110 mmu->funcs->detach(mmu);
1111 msm_gem_address_space_put(dpu_kms->base.aspace);
1112
1113 dpu_kms->base.aspace = NULL;
1114 }
1115
_dpu_kms_mmu_init(struct dpu_kms * dpu_kms)1116 static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
1117 {
1118 struct msm_gem_address_space *aspace;
1119
1120 aspace = msm_kms_init_aspace(dpu_kms->dev);
1121 if (IS_ERR(aspace))
1122 return PTR_ERR(aspace);
1123
1124 dpu_kms->base.aspace = aspace;
1125
1126 return 0;
1127 }
1128
1129 /**
1130 * dpu_kms_get_clk_rate() - get the clock rate
1131 * @dpu_kms: pointer to dpu_kms structure
1132 * @clock_name: clock name to get the rate
1133 *
1134 * Return: current clock rate
1135 */
dpu_kms_get_clk_rate(struct dpu_kms * dpu_kms,char * clock_name)1136 unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
1137 {
1138 struct clk *clk;
1139
1140 clk = msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, clock_name);
1141 if (!clk)
1142 return 0;
1143
1144 return clk_get_rate(clk);
1145 }
1146
1147 #define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
1148
dpu_kms_hw_init(struct msm_kms * kms)1149 static int dpu_kms_hw_init(struct msm_kms *kms)
1150 {
1151 struct dpu_kms *dpu_kms;
1152 struct drm_device *dev;
1153 int i, rc = -EINVAL;
1154 unsigned long max_core_clk_rate;
1155 u32 core_rev;
1156
1157 if (!kms) {
1158 DPU_ERROR("invalid kms\n");
1159 return rc;
1160 }
1161
1162 dpu_kms = to_dpu_kms(kms);
1163 dev = dpu_kms->dev;
1164
1165 dev->mode_config.cursor_width = 512;
1166 dev->mode_config.cursor_height = 512;
1167
1168 rc = dpu_kms_global_obj_init(dpu_kms);
1169 if (rc)
1170 return rc;
1171
1172 atomic_set(&dpu_kms->bandwidth_ref, 0);
1173
1174 rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
1175 if (rc < 0)
1176 goto error;
1177
1178 core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
1179
1180 pr_info("dpu hardware revision:0x%x\n", core_rev);
1181
1182 dpu_kms->catalog = of_device_get_match_data(dev->dev);
1183 if (!dpu_kms->catalog) {
1184 DPU_ERROR("device config not known!\n");
1185 rc = -EINVAL;
1186 goto err_pm_put;
1187 }
1188
1189 /*
1190 * Now we need to read the HW catalog and initialize resources such as
1191 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
1192 */
1193 rc = _dpu_kms_mmu_init(dpu_kms);
1194 if (rc) {
1195 DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
1196 goto err_pm_put;
1197 }
1198
1199 dpu_kms->mdss = msm_mdss_get_mdss_data(dpu_kms->pdev->dev.parent);
1200 if (IS_ERR(dpu_kms->mdss)) {
1201 rc = PTR_ERR(dpu_kms->mdss);
1202 DPU_ERROR("failed to get MDSS data: %d\n", rc);
1203 goto err_pm_put;
1204 }
1205
1206 if (!dpu_kms->mdss) {
1207 rc = -EINVAL;
1208 DPU_ERROR("NULL MDSS data\n");
1209 goto err_pm_put;
1210 }
1211
1212 rc = dpu_rm_init(dev, &dpu_kms->rm, dpu_kms->catalog, dpu_kms->mdss, dpu_kms->mmio);
1213 if (rc) {
1214 DPU_ERROR("rm init failed: %d\n", rc);
1215 goto err_pm_put;
1216 }
1217
1218 dpu_kms->hw_mdp = dpu_hw_mdptop_init(dev,
1219 dpu_kms->catalog->mdp,
1220 dpu_kms->mmio,
1221 dpu_kms->catalog->mdss_ver);
1222 if (IS_ERR(dpu_kms->hw_mdp)) {
1223 rc = PTR_ERR(dpu_kms->hw_mdp);
1224 DPU_ERROR("failed to get hw_mdp: %d\n", rc);
1225 dpu_kms->hw_mdp = NULL;
1226 goto err_pm_put;
1227 }
1228
1229 for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
1230 struct dpu_hw_vbif *hw;
1231 const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
1232
1233 hw = dpu_hw_vbif_init(dev, vbif, dpu_kms->vbif[vbif->id]);
1234 if (IS_ERR(hw)) {
1235 rc = PTR_ERR(hw);
1236 DPU_ERROR("failed to init vbif %d: %d\n", vbif->id, rc);
1237 goto err_pm_put;
1238 }
1239
1240 dpu_kms->hw_vbif[vbif->id] = hw;
1241 }
1242
1243 /* TODO: use the same max_freq as in dpu_kms_hw_init */
1244 max_core_clk_rate = dpu_kms_get_clk_rate(dpu_kms, "core");
1245 if (!max_core_clk_rate) {
1246 DPU_DEBUG("max core clk rate not determined, using default\n");
1247 max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
1248 }
1249
1250 rc = dpu_core_perf_init(&dpu_kms->perf, dpu_kms->catalog->perf, max_core_clk_rate);
1251 if (rc) {
1252 DPU_ERROR("failed to init perf %d\n", rc);
1253 goto err_pm_put;
1254 }
1255
1256 /*
1257 * We need to program DP <-> PHY relationship only for SC8180X since it
1258 * has fewer DP controllers than DP PHYs.
1259 * If any other platform requires the same kind of programming, or if
1260 * the INTF <->DP relationship isn't static anymore, this needs to be
1261 * configured through the DT.
1262 */
1263 if (of_device_is_compatible(dpu_kms->pdev->dev.of_node, "qcom,sc8180x-dpu"))
1264 dpu_kms->hw_mdp->ops.dp_phy_intf_sel(dpu_kms->hw_mdp, (unsigned int[]){ 1, 2, });
1265
1266 dpu_kms->hw_intr = dpu_hw_intr_init(dev, dpu_kms->mmio, dpu_kms->catalog);
1267 if (IS_ERR(dpu_kms->hw_intr)) {
1268 rc = PTR_ERR(dpu_kms->hw_intr);
1269 DPU_ERROR("hw_intr init failed: %d\n", rc);
1270 dpu_kms->hw_intr = NULL;
1271 goto err_pm_put;
1272 }
1273
1274 dev->mode_config.min_width = 0;
1275 dev->mode_config.min_height = 0;
1276
1277 dev->mode_config.max_width = DPU_MAX_IMG_WIDTH;
1278 dev->mode_config.max_height = DPU_MAX_IMG_HEIGHT;
1279
1280 dev->max_vblank_count = 0xffffffff;
1281 /* Disable vblank irqs aggressively for power-saving */
1282 dev->vblank_disable_immediate = true;
1283
1284 /*
1285 * _dpu_kms_drm_obj_init should create the DRM related objects
1286 * i.e. CRTCs, planes, encoders, connectors and so forth
1287 */
1288 rc = _dpu_kms_drm_obj_init(dpu_kms);
1289 if (rc) {
1290 DPU_ERROR("modeset init failed: %d\n", rc);
1291 goto err_pm_put;
1292 }
1293
1294 dpu_vbif_init_memtypes(dpu_kms);
1295
1296 pm_runtime_put_sync(&dpu_kms->pdev->dev);
1297
1298 return 0;
1299
1300 err_pm_put:
1301 pm_runtime_put_sync(&dpu_kms->pdev->dev);
1302 error:
1303 _dpu_kms_hw_destroy(dpu_kms);
1304
1305 return rc;
1306 }
1307
dpu_kms_init(struct drm_device * ddev)1308 static int dpu_kms_init(struct drm_device *ddev)
1309 {
1310 struct msm_drm_private *priv = ddev->dev_private;
1311 struct device *dev = ddev->dev;
1312 struct platform_device *pdev = to_platform_device(dev);
1313 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
1314 struct dev_pm_opp *opp;
1315 int ret = 0;
1316 unsigned long max_freq = ULONG_MAX;
1317
1318 opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
1319 if (!IS_ERR(opp))
1320 dev_pm_opp_put(opp);
1321
1322 dev_pm_opp_set_rate(dev, max_freq);
1323
1324 ret = msm_kms_init(&dpu_kms->base, &kms_funcs);
1325 if (ret) {
1326 DPU_ERROR("failed to init kms, ret=%d\n", ret);
1327 return ret;
1328 }
1329 dpu_kms->dev = ddev;
1330
1331 pm_runtime_enable(&pdev->dev);
1332 dpu_kms->rpm_enabled = true;
1333
1334 return 0;
1335 }
1336
dpu_kms_mmap_mdp5(struct dpu_kms * dpu_kms)1337 static int dpu_kms_mmap_mdp5(struct dpu_kms *dpu_kms)
1338 {
1339 struct platform_device *pdev = dpu_kms->pdev;
1340 struct platform_device *mdss_dev;
1341 int ret;
1342
1343 if (!dev_is_platform(dpu_kms->pdev->dev.parent))
1344 return -EINVAL;
1345
1346 mdss_dev = to_platform_device(dpu_kms->pdev->dev.parent);
1347
1348 dpu_kms->mmio = msm_ioremap(pdev, "mdp_phys");
1349 if (IS_ERR(dpu_kms->mmio)) {
1350 ret = PTR_ERR(dpu_kms->mmio);
1351 DPU_ERROR("mdp register memory map failed: %d\n", ret);
1352 dpu_kms->mmio = NULL;
1353 return ret;
1354 }
1355 DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
1356
1357 dpu_kms->vbif[VBIF_RT] = msm_ioremap_mdss(mdss_dev,
1358 dpu_kms->pdev,
1359 "vbif_phys");
1360 if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
1361 ret = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
1362 DPU_ERROR("vbif register memory map failed: %d\n", ret);
1363 dpu_kms->vbif[VBIF_RT] = NULL;
1364 return ret;
1365 }
1366
1367 dpu_kms->vbif[VBIF_NRT] = msm_ioremap_mdss(mdss_dev,
1368 dpu_kms->pdev,
1369 "vbif_nrt_phys");
1370 if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
1371 dpu_kms->vbif[VBIF_NRT] = NULL;
1372 DPU_DEBUG("VBIF NRT is not defined");
1373 }
1374
1375 return 0;
1376 }
1377
dpu_kms_mmap_dpu(struct dpu_kms * dpu_kms)1378 static int dpu_kms_mmap_dpu(struct dpu_kms *dpu_kms)
1379 {
1380 struct platform_device *pdev = dpu_kms->pdev;
1381 int ret;
1382
1383 dpu_kms->mmio = msm_ioremap(pdev, "mdp");
1384 if (IS_ERR(dpu_kms->mmio)) {
1385 ret = PTR_ERR(dpu_kms->mmio);
1386 DPU_ERROR("mdp register memory map failed: %d\n", ret);
1387 dpu_kms->mmio = NULL;
1388 return ret;
1389 }
1390 DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
1391
1392 dpu_kms->vbif[VBIF_RT] = msm_ioremap(pdev, "vbif");
1393 if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
1394 ret = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
1395 DPU_ERROR("vbif register memory map failed: %d\n", ret);
1396 dpu_kms->vbif[VBIF_RT] = NULL;
1397 return ret;
1398 }
1399
1400 dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(pdev, "vbif_nrt");
1401 if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
1402 dpu_kms->vbif[VBIF_NRT] = NULL;
1403 DPU_DEBUG("VBIF NRT is not defined");
1404 }
1405
1406 return 0;
1407 }
1408
dpu_dev_probe(struct platform_device * pdev)1409 static int dpu_dev_probe(struct platform_device *pdev)
1410 {
1411 struct device *dev = &pdev->dev;
1412 struct dpu_kms *dpu_kms;
1413 int irq;
1414 int ret = 0;
1415
1416 if (!msm_disp_drv_should_bind(&pdev->dev, true))
1417 return -ENODEV;
1418
1419 dpu_kms = devm_kzalloc(dev, sizeof(*dpu_kms), GFP_KERNEL);
1420 if (!dpu_kms)
1421 return -ENOMEM;
1422
1423 dpu_kms->pdev = pdev;
1424
1425 ret = devm_pm_opp_set_clkname(dev, "core");
1426 if (ret)
1427 return ret;
1428 /* OPP table is optional */
1429 ret = devm_pm_opp_of_add_table(dev);
1430 if (ret && ret != -ENODEV)
1431 return dev_err_probe(dev, ret, "invalid OPP table in device tree\n");
1432
1433 ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
1434 if (ret < 0)
1435 return dev_err_probe(dev, ret, "failed to parse clocks\n");
1436
1437 dpu_kms->num_clocks = ret;
1438
1439 irq = platform_get_irq(pdev, 0);
1440 if (irq < 0)
1441 return dev_err_probe(dev, irq, "failed to get irq\n");
1442
1443 dpu_kms->base.irq = irq;
1444
1445 if (of_device_is_compatible(dpu_kms->pdev->dev.of_node, "qcom,mdp5"))
1446 ret = dpu_kms_mmap_mdp5(dpu_kms);
1447 else
1448 ret = dpu_kms_mmap_dpu(dpu_kms);
1449 if (ret)
1450 return ret;
1451
1452 ret = dpu_kms_parse_data_bus_icc_path(dpu_kms);
1453 if (ret)
1454 return ret;
1455
1456 return msm_drv_probe(&pdev->dev, dpu_kms_init, &dpu_kms->base);
1457 }
1458
dpu_dev_remove(struct platform_device * pdev)1459 static void dpu_dev_remove(struct platform_device *pdev)
1460 {
1461 component_master_del(&pdev->dev, &msm_drm_ops);
1462 }
1463
dpu_runtime_suspend(struct device * dev)1464 static int __maybe_unused dpu_runtime_suspend(struct device *dev)
1465 {
1466 int i;
1467 struct platform_device *pdev = to_platform_device(dev);
1468 struct msm_drm_private *priv = platform_get_drvdata(pdev);
1469 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
1470
1471 /* Drop the performance state vote */
1472 dev_pm_opp_set_rate(dev, 0);
1473 clk_bulk_disable_unprepare(dpu_kms->num_clocks, dpu_kms->clocks);
1474
1475 for (i = 0; i < dpu_kms->num_paths; i++)
1476 icc_set_bw(dpu_kms->path[i], 0, 0);
1477
1478 return 0;
1479 }
1480
dpu_runtime_resume(struct device * dev)1481 static int __maybe_unused dpu_runtime_resume(struct device *dev)
1482 {
1483 int rc = -1;
1484 struct platform_device *pdev = to_platform_device(dev);
1485 struct msm_drm_private *priv = platform_get_drvdata(pdev);
1486 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
1487 struct drm_encoder *encoder;
1488 struct drm_device *ddev;
1489
1490 ddev = dpu_kms->dev;
1491
1492 rc = clk_bulk_prepare_enable(dpu_kms->num_clocks, dpu_kms->clocks);
1493 if (rc) {
1494 DPU_ERROR("clock enable failed rc:%d\n", rc);
1495 return rc;
1496 }
1497
1498 dpu_vbif_init_memtypes(dpu_kms);
1499
1500 drm_for_each_encoder(encoder, ddev)
1501 dpu_encoder_virt_runtime_resume(encoder);
1502
1503 return rc;
1504 }
1505
1506 static const struct dev_pm_ops dpu_pm_ops = {
1507 SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
1508 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1509 pm_runtime_force_resume)
1510 .prepare = msm_kms_pm_prepare,
1511 .complete = msm_kms_pm_complete,
1512 };
1513
1514 static const struct of_device_id dpu_dt_match[] = {
1515 { .compatible = "qcom,msm8917-mdp5", .data = &dpu_msm8917_cfg, },
1516 { .compatible = "qcom,msm8937-mdp5", .data = &dpu_msm8937_cfg, },
1517 { .compatible = "qcom,msm8953-mdp5", .data = &dpu_msm8953_cfg, },
1518 { .compatible = "qcom,msm8996-mdp5", .data = &dpu_msm8996_cfg, },
1519 { .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, },
1520 { .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, },
1521 { .compatible = "qcom,sa8775p-dpu", .data = &dpu_sa8775p_cfg, },
1522 { .compatible = "qcom,sdm630-mdp5", .data = &dpu_sdm630_cfg, },
1523 { .compatible = "qcom,sdm660-mdp5", .data = &dpu_sdm660_cfg, },
1524 { .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, },
1525 { .compatible = "qcom,sdm845-dpu", .data = &dpu_sdm845_cfg, },
1526 { .compatible = "qcom,sc7180-dpu", .data = &dpu_sc7180_cfg, },
1527 { .compatible = "qcom,sc7280-dpu", .data = &dpu_sc7280_cfg, },
1528 { .compatible = "qcom,sc8180x-dpu", .data = &dpu_sc8180x_cfg, },
1529 { .compatible = "qcom,sc8280xp-dpu", .data = &dpu_sc8280xp_cfg, },
1530 { .compatible = "qcom,sm6115-dpu", .data = &dpu_sm6115_cfg, },
1531 { .compatible = "qcom,sm6125-dpu", .data = &dpu_sm6125_cfg, },
1532 { .compatible = "qcom,sm6150-dpu", .data = &dpu_sm6150_cfg, },
1533 { .compatible = "qcom,sm6350-dpu", .data = &dpu_sm6350_cfg, },
1534 { .compatible = "qcom,sm6375-dpu", .data = &dpu_sm6375_cfg, },
1535 { .compatible = "qcom,sm7150-dpu", .data = &dpu_sm7150_cfg, },
1536 { .compatible = "qcom,sm8150-dpu", .data = &dpu_sm8150_cfg, },
1537 { .compatible = "qcom,sm8250-dpu", .data = &dpu_sm8250_cfg, },
1538 { .compatible = "qcom,sm8350-dpu", .data = &dpu_sm8350_cfg, },
1539 { .compatible = "qcom,sm8450-dpu", .data = &dpu_sm8450_cfg, },
1540 { .compatible = "qcom,sm8550-dpu", .data = &dpu_sm8550_cfg, },
1541 { .compatible = "qcom,sm8650-dpu", .data = &dpu_sm8650_cfg, },
1542 { .compatible = "qcom,x1e80100-dpu", .data = &dpu_x1e80100_cfg, },
1543 {}
1544 };
1545 MODULE_DEVICE_TABLE(of, dpu_dt_match);
1546
1547 static struct platform_driver dpu_driver = {
1548 .probe = dpu_dev_probe,
1549 .remove = dpu_dev_remove,
1550 .shutdown = msm_kms_shutdown,
1551 .driver = {
1552 .name = "msm_dpu",
1553 .of_match_table = dpu_dt_match,
1554 .pm = &dpu_pm_ops,
1555 },
1556 };
1557
msm_dpu_register(void)1558 void __init msm_dpu_register(void)
1559 {
1560 platform_driver_register(&dpu_driver);
1561 }
1562
msm_dpu_unregister(void)1563 void __exit msm_dpu_unregister(void)
1564 {
1565 platform_driver_unregister(&dpu_driver);
1566 }
1567