Lines Matching full:gmu
21 static void a6xx_gmu_fault(struct a6xx_gmu *gmu) in a6xx_gmu_fault() argument
23 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fault()
28 gmu->hung = true; in a6xx_gmu_fault()
39 struct a6xx_gmu *gmu = data; in a6xx_gmu_irq() local
42 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); in a6xx_gmu_irq()
43 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); in a6xx_gmu_irq()
46 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); in a6xx_gmu_irq()
48 a6xx_gmu_fault(gmu); in a6xx_gmu_irq()
52 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); in a6xx_gmu_irq()
55 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", in a6xx_gmu_irq()
56 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS)); in a6xx_gmu_irq()
63 struct a6xx_gmu *gmu = data; in a6xx_hfi_irq() local
66 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); in a6xx_hfi_irq()
67 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status); in a6xx_hfi_irq()
70 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); in a6xx_hfi_irq()
72 a6xx_gmu_fault(gmu); in a6xx_hfi_irq()
78 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) in a6xx_gmu_sptprac_is_on() argument
82 /* This can be called from gpu state code so make sure GMU is valid */ in a6xx_gmu_sptprac_is_on()
83 if (!gmu->initialized) in a6xx_gmu_sptprac_is_on()
86 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); in a6xx_gmu_sptprac_is_on()
94 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) in a6xx_gmu_gx_is_on() argument
98 /* This can be called from gpu state code so make sure GMU is valid */ in a6xx_gmu_gx_is_on()
99 if (!gmu->initialized) in a6xx_gmu_gx_is_on()
102 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); in a6xx_gmu_gx_is_on()
115 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_set_freq() local
123 if (gpu_freq == gmu->freq) in a6xx_gmu_set_freq()
126 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) in a6xx_gmu_set_freq()
127 if (gpu_freq == gmu->gpu_freqs[perf_index]) in a6xx_gmu_set_freq()
131 if (info->bcms && gmu->nr_gpu_bws > 1) { in a6xx_gmu_set_freq()
134 for (bw_index = 0; bw_index < gmu->nr_gpu_bws - 1; bw_index++) { in a6xx_gmu_set_freq()
135 if (bw == gmu->gpu_bw_table[bw_index]) in a6xx_gmu_set_freq()
154 do_div(tmp, gmu->gpu_bw_table[gmu->nr_gpu_bws - 1]); in a6xx_gmu_set_freq()
161 gmu->current_perf_index = perf_index; in a6xx_gmu_set_freq()
162 gmu->freq = gmu->gpu_freqs[perf_index]; in a6xx_gmu_set_freq()
164 trace_msm_gmu_freq_change(gmu->freq, perf_index); in a6xx_gmu_set_freq()
175 if (!gmu->legacy) { in a6xx_gmu_set_freq()
176 a6xx_hfi_set_freq(gmu, perf_index, bw_index); in a6xx_gmu_set_freq()
183 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); in a6xx_gmu_set_freq()
185 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, in a6xx_gmu_set_freq()
192 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff); in a6xx_gmu_set_freq()
194 /* Set and clear the OOB for DCVS to trigger the GMU */ in a6xx_gmu_set_freq()
195 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET); in a6xx_gmu_set_freq()
196 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET); in a6xx_gmu_set_freq()
198 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); in a6xx_gmu_set_freq()
200 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); in a6xx_gmu_set_freq()
209 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_get_freq() local
211 return gmu->freq; in a6xx_gmu_get_freq()
214 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) in a6xx_gmu_check_idle_level() argument
217 int local = gmu->idle_level; in a6xx_gmu_check_idle_level()
220 if (gmu->idle_level == GMU_IDLE_STATE_SPTP) in a6xx_gmu_check_idle_level()
223 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); in a6xx_gmu_check_idle_level()
226 if (gmu->idle_level != GMU_IDLE_STATE_IFPC || in a6xx_gmu_check_idle_level()
227 !a6xx_gmu_gx_is_on(gmu)) in a6xx_gmu_check_idle_level()
234 /* Wait for the GMU to get to its most idle state */
235 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu) in a6xx_gmu_wait_for_idle() argument
237 return spin_until(a6xx_gmu_check_idle_level(gmu)); in a6xx_gmu_wait_for_idle()
240 static int a6xx_gmu_start(struct a6xx_gmu *gmu) in a6xx_gmu_start() argument
242 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_start()
247 val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8); in a6xx_gmu_start()
256 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); in a6xx_gmu_start()
262 gmu_write(gmu, REG_A7XX_GMU_GENERAL_9, 0); in a6xx_gmu_start()
264 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0); in a6xx_gmu_start()
267 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); in a6xx_gmu_start()
269 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, in a6xx_gmu_start()
273 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); in a6xx_gmu_start()
278 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) in a6xx_gmu_hfi_start() argument
283 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1); in a6xx_gmu_hfi_start()
285 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, in a6xx_gmu_hfi_start()
288 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); in a6xx_gmu_hfi_start()
337 /* Trigger a OOB (out of band) request to the GMU */
338 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) in a6xx_gmu_set_oob() argument
344 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); in a6xx_gmu_set_oob()
349 if (gmu->legacy) { in a6xx_gmu_set_oob()
356 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_set_oob()
357 "Invalid non-legacy GMU request %s\n", in a6xx_gmu_set_oob()
364 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); in a6xx_gmu_set_oob()
367 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, in a6xx_gmu_set_oob()
371 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_set_oob()
372 "Timeout waiting for GMU OOB set %s: 0x%x\n", in a6xx_gmu_set_oob()
374 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); in a6xx_gmu_set_oob()
377 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack); in a6xx_gmu_set_oob()
382 /* Clear a pending OOB state in the GMU */
383 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) in a6xx_gmu_clear_oob() argument
387 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); in a6xx_gmu_clear_oob()
392 if (gmu->legacy) in a6xx_gmu_clear_oob()
397 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit); in a6xx_gmu_clear_oob()
401 int a6xx_sptprac_enable(struct a6xx_gmu *gmu) in a6xx_sptprac_enable() argument
406 if (!gmu->legacy) in a6xx_sptprac_enable()
409 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); in a6xx_sptprac_enable()
411 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, in a6xx_sptprac_enable()
415 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", in a6xx_sptprac_enable()
416 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); in a6xx_sptprac_enable()
423 void a6xx_sptprac_disable(struct a6xx_gmu *gmu) in a6xx_sptprac_disable() argument
428 if (!gmu->legacy) in a6xx_sptprac_disable()
432 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11)); in a6xx_sptprac_disable()
434 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001); in a6xx_sptprac_disable()
436 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, in a6xx_sptprac_disable()
440 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", in a6xx_sptprac_disable()
441 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); in a6xx_sptprac_disable()
444 /* Let the GMU know we are starting a boot sequence */
445 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) in a6xx_gmu_gfx_rail_on() argument
449 /* Let the GMU know we are getting ready for boot */ in a6xx_gmu_gfx_rail_on()
450 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0); in a6xx_gmu_gfx_rail_on()
453 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; in a6xx_gmu_gfx_rail_on()
455 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff); in a6xx_gmu_gfx_rail_on()
456 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff); in a6xx_gmu_gfx_rail_on()
458 /* Let the GMU know the boot sequence has started */ in a6xx_gmu_gfx_rail_on()
459 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); in a6xx_gmu_gfx_rail_on()
462 static void a6xx_gemnoc_workaround(struct a6xx_gmu *gmu) in a6xx_gemnoc_workaround() argument
464 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gemnoc_workaround()
473 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, BIT(0)); in a6xx_gemnoc_workaround()
476 /* Let the GMU know that we are about to go into slumber */
477 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) in a6xx_gmu_notify_slumber() argument
481 /* Disable the power counter so the GMU isn't busy */ in a6xx_gmu_notify_slumber()
482 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); in a6xx_gmu_notify_slumber()
485 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) in a6xx_gmu_notify_slumber()
486 a6xx_sptprac_disable(gmu); in a6xx_gmu_notify_slumber()
488 if (!gmu->legacy) { in a6xx_gmu_notify_slumber()
489 ret = a6xx_hfi_send_prep_slumber(gmu); in a6xx_gmu_notify_slumber()
493 /* Tell the GMU to get ready to slumber */ in a6xx_gmu_notify_slumber()
494 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1); in a6xx_gmu_notify_slumber()
496 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); in a6xx_gmu_notify_slumber()
497 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); in a6xx_gmu_notify_slumber()
500 /* Check to see if the GMU really did slumber */ in a6xx_gmu_notify_slumber()
501 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE) in a6xx_gmu_notify_slumber()
503 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); in a6xx_gmu_notify_slumber()
509 a6xx_gemnoc_workaround(gmu); in a6xx_gmu_notify_slumber()
512 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); in a6xx_gmu_notify_slumber()
516 static int a6xx_rpmh_start(struct a6xx_gmu *gmu) in a6xx_rpmh_start() argument
521 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, BIT(1)); in a6xx_rpmh_start()
523 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, in a6xx_rpmh_start()
526 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); in a6xx_rpmh_start()
530 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, in a6xx_rpmh_start()
534 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); in a6xx_rpmh_start()
538 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); in a6xx_rpmh_start()
543 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) in a6xx_rpmh_stop() argument
548 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); in a6xx_rpmh_stop()
550 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, in a6xx_rpmh_stop()
553 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); in a6xx_rpmh_stop()
555 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); in a6xx_rpmh_stop()
566 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) in a6xx_gmu_rpmh_init() argument
568 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_rpmh_init()
570 struct platform_device *pdev = to_platform_device(gmu->dev); in a6xx_gmu_rpmh_init()
597 gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); in a6xx_gmu_rpmh_init()
600 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); in a6xx_gmu_rpmh_init()
601 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); in a6xx_gmu_rpmh_init()
602 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); in a6xx_gmu_rpmh_init()
603 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); in a6xx_gmu_rpmh_init()
604 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); in a6xx_gmu_rpmh_init()
605 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, in a6xx_gmu_rpmh_init()
607 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); in a6xx_gmu_rpmh_init()
608 gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); in a6xx_gmu_rpmh_init()
609 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); in a6xx_gmu_rpmh_init()
610 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); in a6xx_gmu_rpmh_init()
611 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); in a6xx_gmu_rpmh_init()
620 gmu_write_rscc(gmu, seqmem0_drv0_reg, 0xeaaae5a0); in a6xx_gmu_rpmh_init()
621 gmu_write_rscc(gmu, seqmem0_drv0_reg + 1, 0xe1a1ebab); in a6xx_gmu_rpmh_init()
622 gmu_write_rscc(gmu, seqmem0_drv0_reg + 2, 0xa2e0a581); in a6xx_gmu_rpmh_init()
623 gmu_write_rscc(gmu, seqmem0_drv0_reg + 3, 0xecac82e2); in a6xx_gmu_rpmh_init()
624 gmu_write_rscc(gmu, seqmem0_drv0_reg + 4, 0x0020edad); in a6xx_gmu_rpmh_init()
626 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); in a6xx_gmu_rpmh_init()
627 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); in a6xx_gmu_rpmh_init()
628 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); in a6xx_gmu_rpmh_init()
629 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); in a6xx_gmu_rpmh_init()
630 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); in a6xx_gmu_rpmh_init()
684 a6xx_rpmh_stop(gmu); in a6xx_gmu_rpmh_init()
701 /* Set up the idle state for the GMU */
702 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) in a6xx_gmu_power_config() argument
704 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_power_config()
707 /* Disable GMU WB/RB buffer */ in a6xx_gmu_power_config()
708 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1); in a6xx_gmu_power_config()
709 gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1); in a6xx_gmu_power_config()
710 gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1); in a6xx_gmu_power_config()
716 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); in a6xx_gmu_power_config()
718 switch (gmu->idle_level) { in a6xx_gmu_power_config()
720 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST, in a6xx_gmu_power_config()
722 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, in a6xx_gmu_power_config()
727 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, in a6xx_gmu_power_config()
729 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, in a6xx_gmu_power_config()
735 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0, in a6xx_gmu_power_config()
761 static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) in a6xx_gmu_fw_load() argument
763 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fw_load()
776 if (gmu->legacy) { in a6xx_gmu_fw_load()
779 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_fw_load()
780 "GMU firmware is bigger than the available region\n"); in a6xx_gmu_fw_load()
784 gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START, in a6xx_gmu_fw_load()
798 gmu_write_bulk(gmu, in a6xx_gmu_fw_load()
803 gmu_write_bulk(gmu, in a6xx_gmu_fw_load()
806 } else if (!fw_block_mem(&gmu->icache, blk) && in a6xx_gmu_fw_load()
807 !fw_block_mem(&gmu->dcache, blk) && in a6xx_gmu_fw_load()
808 !fw_block_mem(&gmu->dummy, blk)) { in a6xx_gmu_fw_load()
809 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_fw_load()
815 ver = gmu_read(gmu, REG_A6XX_GMU_CORE_FW_VERSION); in a6xx_gmu_fw_load()
816 DRM_INFO_ONCE("Loaded GMU firmware v%u.%u.%u\n", in a6xx_gmu_fw_load()
824 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) in a6xx_gmu_fw_start() argument
826 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fw_start()
835 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1); in a6xx_gmu_fw_start()
836 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); in a6xx_gmu_fw_start()
843 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); in a6xx_gmu_fw_start()
846 ret = a6xx_rpmh_start(gmu); in a6xx_gmu_fw_start()
851 "GMU firmware is not loaded\n")) in a6xx_gmu_fw_start()
854 ret = a6xx_rpmh_start(gmu); in a6xx_gmu_fw_start()
858 ret = a6xx_gmu_fw_load(gmu); in a6xx_gmu_fw_start()
864 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0); in a6xx_gmu_fw_start()
865 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02); in a6xx_gmu_fw_start()
868 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); in a6xx_gmu_fw_start()
869 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); in a6xx_gmu_fw_start()
879 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, in a6xx_gmu_fw_start()
888 gmu_write(gmu, REG_A6XX_GMU_CM3_CFG, 0x4052); in a6xx_gmu_fw_start()
894 * Note that the GMU has a slightly different layout for in a6xx_gmu_fw_start()
906 gmu_write(gmu, REG_A7XX_GMU_GENERAL_10, chipid); in a6xx_gmu_fw_start()
907 gmu_write(gmu, REG_A7XX_GMU_GENERAL_8, in a6xx_gmu_fw_start()
908 (gmu->log.iova & GENMASK(31, 12)) | in a6xx_gmu_fw_start()
909 ((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0))); in a6xx_gmu_fw_start()
911 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); in a6xx_gmu_fw_start()
913 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG, in a6xx_gmu_fw_start()
914 gmu->log.iova | (gmu->log.size / SZ_4K - 1)); in a6xx_gmu_fw_start()
917 /* Set up the lowest idle level on the GMU */ in a6xx_gmu_fw_start()
918 a6xx_gmu_power_config(gmu); in a6xx_gmu_fw_start()
920 ret = a6xx_gmu_start(gmu); in a6xx_gmu_fw_start()
924 if (gmu->legacy) { in a6xx_gmu_fw_start()
925 ret = a6xx_gmu_gfx_rail_on(gmu); in a6xx_gmu_fw_start()
931 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { in a6xx_gmu_fw_start()
932 ret = a6xx_sptprac_enable(gmu); in a6xx_gmu_fw_start()
937 ret = a6xx_gmu_hfi_start(gmu); in a6xx_gmu_fw_start()
955 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) in a6xx_gmu_irq_disable() argument
957 disable_irq(gmu->gmu_irq); in a6xx_gmu_irq_disable()
958 disable_irq(gmu->hfi_irq); in a6xx_gmu_irq_disable()
960 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0); in a6xx_gmu_irq_disable()
961 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0); in a6xx_gmu_irq_disable()
964 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) in a6xx_gmu_rpmh_off() argument
966 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_rpmh_off()
975 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS + seqmem_off, in a6xx_gmu_rpmh_off()
977 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS + seqmem_off, in a6xx_gmu_rpmh_off()
979 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS + seqmem_off, in a6xx_gmu_rpmh_off()
981 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS + seqmem_off, in a6xx_gmu_rpmh_off()
985 /* Force the GMU off in case it isn't responsive */
986 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) in a6xx_gmu_force_off() argument
988 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_force_off()
996 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); in a6xx_gmu_force_off()
999 a6xx_hfi_stop(gmu); in a6xx_gmu_force_off()
1002 a6xx_gmu_irq_disable(gmu); in a6xx_gmu_force_off()
1004 /* Force off SPTP in case the GMU is managing it */ in a6xx_gmu_force_off()
1005 a6xx_sptprac_disable(gmu); in a6xx_gmu_force_off()
1007 a6xx_gemnoc_workaround(gmu); in a6xx_gmu_force_off()
1010 a6xx_gmu_rpmh_off(gmu); in a6xx_gmu_force_off()
1013 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS_CLR, 0x7); in a6xx_gmu_force_off()
1014 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); in a6xx_gmu_force_off()
1019 /* Halt the gmu cm3 core */ in a6xx_gmu_force_off()
1020 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); in a6xx_gmu_force_off()
1028 static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) in a6xx_gmu_set_initial_freq() argument
1031 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; in a6xx_gmu_set_initial_freq()
1037 gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ in a6xx_gmu_set_initial_freq()
1042 static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) in a6xx_gmu_set_initial_bw() argument
1045 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; in a6xx_gmu_set_initial_bw()
1059 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_resume() local
1062 if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) in a6xx_gmu_resume()
1065 gmu->hung = false; in a6xx_gmu_resume()
1068 if (!IS_ERR(gmu->qmp)) { in a6xx_gmu_resume()
1069 ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", in a6xx_gmu_resume()
1072 dev_err(gmu->dev, "failed to send GPU ACD state\n"); in a6xx_gmu_resume()
1076 pm_runtime_get_sync(gmu->dev); in a6xx_gmu_resume()
1081 * bring down the GX after a GMU failure in a6xx_gmu_resume()
1083 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_resume()
1084 pm_runtime_get_sync(gmu->gxpd); in a6xx_gmu_resume()
1086 /* Use a known rate to bring up the GMU */ in a6xx_gmu_resume()
1087 clk_set_rate(gmu->core_clk, 200000000); in a6xx_gmu_resume()
1088 clk_set_rate(gmu->hub_clk, adreno_is_a740_family(adreno_gpu) ? in a6xx_gmu_resume()
1090 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); in a6xx_gmu_resume()
1092 pm_runtime_put(gmu->gxpd); in a6xx_gmu_resume()
1093 pm_runtime_put(gmu->dev); in a6xx_gmu_resume()
1098 a6xx_gmu_set_initial_bw(gpu, gmu); in a6xx_gmu_resume()
1100 /* Enable the GMU interrupt */ in a6xx_gmu_resume()
1101 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); in a6xx_gmu_resume()
1102 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK); in a6xx_gmu_resume()
1103 enable_irq(gmu->gmu_irq); in a6xx_gmu_resume()
1109 } else if (gmu->legacy) { in a6xx_gmu_resume()
1110 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? in a6xx_gmu_resume()
1120 ret = a6xx_gmu_fw_start(gmu, status); in a6xx_gmu_resume()
1124 ret = a6xx_hfi_start(gmu, status); in a6xx_gmu_resume()
1129 * Turn on the GMU firmware fault interrupt after we know the boot in a6xx_gmu_resume()
1132 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); in a6xx_gmu_resume()
1133 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK); in a6xx_gmu_resume()
1134 enable_irq(gmu->hfi_irq); in a6xx_gmu_resume()
1137 a6xx_gmu_set_initial_freq(gpu, gmu); in a6xx_gmu_resume()
1140 /* On failure, shut down the GMU to leave it in a good state */ in a6xx_gmu_resume()
1142 disable_irq(gmu->gmu_irq); in a6xx_gmu_resume()
1143 a6xx_rpmh_stop(gmu); in a6xx_gmu_resume()
1144 pm_runtime_put(gmu->gxpd); in a6xx_gmu_resume()
1145 pm_runtime_put(gmu->dev); in a6xx_gmu_resume()
1151 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) in a6xx_gmu_isidle() argument
1155 if (!gmu->initialized) in a6xx_gmu_isidle()
1158 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); in a6xx_gmu_isidle()
1166 /* Gracefully try to shut down the GMU and by extension the GPU */
1167 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) in a6xx_gmu_shutdown() argument
1169 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_shutdown()
1175 * GMU firmware's internal power state gets messed up if we send "prepare_slumber" hfi when in a6xx_gmu_shutdown()
1180 if (a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET)) in a6xx_gmu_shutdown()
1183 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in a6xx_gmu_shutdown()
1186 ret = a6xx_gmu_wait_for_idle(gmu); in a6xx_gmu_shutdown()
1188 /* If the GMU isn't responding assume it is hung */ in a6xx_gmu_shutdown()
1194 /* tell the GMU we want to slumber */ in a6xx_gmu_shutdown()
1195 ret = a6xx_gmu_notify_slumber(gmu); in a6xx_gmu_shutdown()
1199 ret = gmu_poll_timeout(gmu, in a6xx_gmu_shutdown()
1210 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_shutdown()
1211 "Unable to slumber GMU: status = 0%x/0%x\n", in a6xx_gmu_shutdown()
1212 gmu_read(gmu, in a6xx_gmu_shutdown()
1214 gmu_read(gmu, in a6xx_gmu_shutdown()
1218 a6xx_hfi_stop(gmu); in a6xx_gmu_shutdown()
1221 a6xx_gmu_irq_disable(gmu); in a6xx_gmu_shutdown()
1224 a6xx_rpmh_stop(gmu); in a6xx_gmu_shutdown()
1229 a6xx_gmu_force_off(gmu); in a6xx_gmu_shutdown()
1235 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_stop() local
1238 if (!pm_runtime_active(gmu->dev)) in a6xx_gmu_stop()
1242 * Force the GMU off if we detected a hang, otherwise try to shut it in a6xx_gmu_stop()
1245 if (gmu->hung) in a6xx_gmu_stop()
1246 a6xx_gmu_force_off(gmu); in a6xx_gmu_stop()
1248 a6xx_gmu_shutdown(gmu); in a6xx_gmu_stop()
1254 * Make sure the GX domain is off before turning off the GMU (CX) in a6xx_gmu_stop()
1255 * domain. Usually the GMU does this but only if the shutdown sequence in a6xx_gmu_stop()
1258 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_stop()
1259 pm_runtime_put_sync(gmu->gxpd); in a6xx_gmu_stop()
1261 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); in a6xx_gmu_stop()
1263 pm_runtime_put_sync(gmu->dev); in a6xx_gmu_stop()
1268 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) in a6xx_gmu_memory_free() argument
1270 msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace); in a6xx_gmu_memory_free()
1271 msm_gem_kernel_put(gmu->debug.obj, gmu->aspace); in a6xx_gmu_memory_free()
1272 msm_gem_kernel_put(gmu->icache.obj, gmu->aspace); in a6xx_gmu_memory_free()
1273 msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace); in a6xx_gmu_memory_free()
1274 msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace); in a6xx_gmu_memory_free()
1275 msm_gem_kernel_put(gmu->log.obj, gmu->aspace); in a6xx_gmu_memory_free()
1277 gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); in a6xx_gmu_memory_free()
1278 msm_gem_address_space_put(gmu->aspace); in a6xx_gmu_memory_free()
1281 static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, in a6xx_gmu_memory_alloc() argument
1284 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_memory_alloc()
1292 /* no fixed address - use GMU's uncached range */ in a6xx_gmu_memory_alloc()
1307 ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, in a6xx_gmu_memory_alloc()
1322 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) in a6xx_gmu_memory_probe() argument
1326 mmu = msm_iommu_new(gmu->dev, 0); in a6xx_gmu_memory_probe()
1332 gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000); in a6xx_gmu_memory_probe()
1333 if (IS_ERR(gmu->aspace)) in a6xx_gmu_memory_probe()
1334 return PTR_ERR(gmu->aspace); in a6xx_gmu_memory_probe()
1355 struct a6xx_gmu *gmu) in a6xx_gmu_rpmh_bw_votes_init() argument
1374 dev_err(gmu->dev, "invalid BCM '%s' aux data size\n", in a6xx_gmu_rpmh_bw_votes_init()
1383 for (bw_index = 0; bw_index < gmu->nr_gpu_bws; bw_index++) { in a6xx_gmu_rpmh_bw_votes_init()
1384 u32 *data = gmu->gpu_ib_votes[bw_index]; in a6xx_gmu_rpmh_bw_votes_init()
1385 u32 bw = gmu->gpu_bw_table[bw_index]; in a6xx_gmu_rpmh_bw_votes_init()
1407 /* GMU on A6xx votes perfmode on all valid bandwidth */ in a6xx_gmu_rpmh_bw_votes_init()
1537 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1540 * The GMU can also vote for DDR interconnects, use the OPP bandwidth entries
1544 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) in a6xx_gmu_rpmh_votes_init() argument
1546 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_rpmh_votes_init()
1553 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, in a6xx_gmu_rpmh_votes_init()
1554 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); in a6xx_gmu_rpmh_votes_init()
1557 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, in a6xx_gmu_rpmh_votes_init()
1558 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); in a6xx_gmu_rpmh_votes_init()
1561 if (info->bcms && gmu->nr_gpu_bws > 1) in a6xx_gmu_rpmh_votes_init()
1562 ret |= a6xx_gmu_rpmh_bw_votes_init(adreno_gpu, info, gmu); in a6xx_gmu_rpmh_votes_init()
1581 "The GMU frequency table is being truncated\n")) in a6xx_gmu_build_freq_table()
1613 "The GMU bandwidth table is being truncated\n")) in a6xx_gmu_build_bw_table()
1631 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) in a6xx_gmu_pwrlevels_probe() argument
1633 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_pwrlevels_probe()
1641 * The GMU handles its own frequency switching so build a list of in a6xx_gmu_pwrlevels_probe()
1644 ret = devm_pm_opp_of_add_table(gmu->dev); in a6xx_gmu_pwrlevels_probe()
1646 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); in a6xx_gmu_pwrlevels_probe()
1650 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, in a6xx_gmu_pwrlevels_probe()
1651 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); in a6xx_gmu_pwrlevels_probe()
1654 * The GMU also handles GPU frequency switching so build a list in a6xx_gmu_pwrlevels_probe()
1657 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, in a6xx_gmu_pwrlevels_probe()
1658 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); in a6xx_gmu_pwrlevels_probe()
1660 gmu->current_perf_index = gmu->nr_gpu_freqs - 1; in a6xx_gmu_pwrlevels_probe()
1663 * The GMU also handles GPU Interconnect Votes so build a list in a6xx_gmu_pwrlevels_probe()
1667 gmu->nr_gpu_bws = a6xx_gmu_build_bw_table(&gpu->pdev->dev, in a6xx_gmu_pwrlevels_probe()
1668 gmu->gpu_bw_table, ARRAY_SIZE(gmu->gpu_bw_table)); in a6xx_gmu_pwrlevels_probe()
1670 /* Build the list of RPMh votes that we'll send to the GMU */ in a6xx_gmu_pwrlevels_probe()
1671 return a6xx_gmu_rpmh_votes_init(gmu); in a6xx_gmu_pwrlevels_probe()
1674 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) in a6xx_gmu_clocks_probe() argument
1676 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); in a6xx_gmu_clocks_probe()
1681 gmu->nr_clocks = ret; in a6xx_gmu_clocks_probe()
1683 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, in a6xx_gmu_clocks_probe()
1684 gmu->nr_clocks, "gmu"); in a6xx_gmu_clocks_probe()
1686 gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks, in a6xx_gmu_clocks_probe()
1687 gmu->nr_clocks, "hub"); in a6xx_gmu_clocks_probe()
1713 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, in a6xx_gmu_get_irq() argument
1720 ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, name, gmu); in a6xx_gmu_get_irq()
1733 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_remove() local
1734 struct platform_device *pdev = to_platform_device(gmu->dev); in a6xx_gmu_remove()
1736 mutex_lock(&gmu->lock); in a6xx_gmu_remove()
1737 if (!gmu->initialized) { in a6xx_gmu_remove()
1738 mutex_unlock(&gmu->lock); in a6xx_gmu_remove()
1742 gmu->initialized = false; in a6xx_gmu_remove()
1744 mutex_unlock(&gmu->lock); in a6xx_gmu_remove()
1746 pm_runtime_force_suspend(gmu->dev); in a6xx_gmu_remove()
1749 * Since cxpd is a virt device, the devlink with gmu-dev will be removed in a6xx_gmu_remove()
1752 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_remove()
1754 if (!IS_ERR_OR_NULL(gmu->gxpd)) { in a6xx_gmu_remove()
1755 pm_runtime_disable(gmu->gxpd); in a6xx_gmu_remove()
1756 dev_pm_domain_detach(gmu->gxpd, false); in a6xx_gmu_remove()
1759 if (!IS_ERR_OR_NULL(gmu->qmp)) in a6xx_gmu_remove()
1760 qmp_put(gmu->qmp); in a6xx_gmu_remove()
1762 iounmap(gmu->mmio); in a6xx_gmu_remove()
1764 iounmap(gmu->rscc); in a6xx_gmu_remove()
1765 gmu->mmio = NULL; in a6xx_gmu_remove()
1766 gmu->rscc = NULL; in a6xx_gmu_remove()
1769 a6xx_gmu_memory_free(gmu); in a6xx_gmu_remove()
1771 free_irq(gmu->gmu_irq, gmu); in a6xx_gmu_remove()
1772 free_irq(gmu->hfi_irq, gmu); in a6xx_gmu_remove()
1776 put_device(gmu->dev); in a6xx_gmu_remove()
1782 struct a6xx_gmu *gmu = container_of(nb, struct a6xx_gmu, pd_nb); in cxpd_notifier_cb() local
1785 complete_all(&gmu->pd_gate); in cxpd_notifier_cb()
1793 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_wrapper_init() local
1799 gmu->dev = &pdev->dev; in a6xx_gmu_wrapper_init()
1801 ret = of_dma_configure(gmu->dev, node, true); in a6xx_gmu_wrapper_init()
1805 pm_runtime_enable(gmu->dev); in a6xx_gmu_wrapper_init()
1808 gmu->legacy = true; in a6xx_gmu_wrapper_init()
1810 /* Map the GMU registers */ in a6xx_gmu_wrapper_init()
1811 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); in a6xx_gmu_wrapper_init()
1812 if (IS_ERR(gmu->mmio)) { in a6xx_gmu_wrapper_init()
1813 ret = PTR_ERR(gmu->mmio); in a6xx_gmu_wrapper_init()
1817 gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); in a6xx_gmu_wrapper_init()
1818 if (IS_ERR(gmu->cxpd)) { in a6xx_gmu_wrapper_init()
1819 ret = PTR_ERR(gmu->cxpd); in a6xx_gmu_wrapper_init()
1823 if (!device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME)) { in a6xx_gmu_wrapper_init()
1828 init_completion(&gmu->pd_gate); in a6xx_gmu_wrapper_init()
1829 complete_all(&gmu->pd_gate); in a6xx_gmu_wrapper_init()
1830 gmu->pd_nb.notifier_call = cxpd_notifier_cb; in a6xx_gmu_wrapper_init()
1833 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); in a6xx_gmu_wrapper_init()
1834 if (IS_ERR(gmu->gxpd)) { in a6xx_gmu_wrapper_init()
1835 ret = PTR_ERR(gmu->gxpd); in a6xx_gmu_wrapper_init()
1839 gmu->initialized = true; in a6xx_gmu_wrapper_init()
1844 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_wrapper_init()
1847 iounmap(gmu->mmio); in a6xx_gmu_wrapper_init()
1850 put_device(gmu->dev); in a6xx_gmu_wrapper_init()
1858 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_init() local
1866 gmu->dev = &pdev->dev; in a6xx_gmu_init()
1868 ret = of_dma_configure(gmu->dev, node, true); in a6xx_gmu_init()
1873 gmu->idle_level = GMU_IDLE_STATE_ACTIVE; in a6xx_gmu_init()
1875 pm_runtime_enable(gmu->dev); in a6xx_gmu_init()
1878 ret = a6xx_gmu_clocks_probe(gmu); in a6xx_gmu_init()
1882 ret = a6xx_gmu_memory_probe(gmu); in a6xx_gmu_init()
1887 /* A660 now requires handling "prealloc requests" in GMU firmware in a6xx_gmu_init()
1893 gmu->dummy.size = SZ_4K; in a6xx_gmu_init()
1896 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, in a6xx_gmu_init()
1901 gmu->dummy.size = SZ_8K; in a6xx_gmu_init()
1904 /* Allocate memory for the GMU dummy page */ in a6xx_gmu_init()
1905 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, in a6xx_gmu_init()
1913 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, in a6xx_gmu_init()
1924 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, in a6xx_gmu_init()
1929 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache, in a6xx_gmu_init()
1935 gmu->legacy = true; in a6xx_gmu_init()
1937 /* Allocate memory for the GMU debug region */ in a6xx_gmu_init()
1938 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug"); in a6xx_gmu_init()
1943 /* Allocate memory for the GMU log region */ in a6xx_gmu_init()
1944 ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_16K, 0, "log"); in a6xx_gmu_init()
1949 ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi"); in a6xx_gmu_init()
1953 /* Map the GMU registers */ in a6xx_gmu_init()
1954 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); in a6xx_gmu_init()
1955 if (IS_ERR(gmu->mmio)) { in a6xx_gmu_init()
1956 ret = PTR_ERR(gmu->mmio); in a6xx_gmu_init()
1962 gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); in a6xx_gmu_init()
1963 if (IS_ERR(gmu->rscc)) { in a6xx_gmu_init()
1968 gmu->rscc = gmu->mmio + 0x23000; in a6xx_gmu_init()
1971 /* Get the HFI and GMU interrupts */ in a6xx_gmu_init()
1972 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); in a6xx_gmu_init()
1973 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); in a6xx_gmu_init()
1975 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) { in a6xx_gmu_init()
1980 gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); in a6xx_gmu_init()
1981 if (IS_ERR(gmu->cxpd)) { in a6xx_gmu_init()
1982 ret = PTR_ERR(gmu->cxpd); in a6xx_gmu_init()
1986 link = device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME); in a6xx_gmu_init()
1992 gmu->qmp = qmp_get(gmu->dev); in a6xx_gmu_init()
1993 if (IS_ERR(gmu->qmp) && adreno_is_a7xx(adreno_gpu)) { in a6xx_gmu_init()
1994 ret = PTR_ERR(gmu->qmp); in a6xx_gmu_init()
1998 init_completion(&gmu->pd_gate); in a6xx_gmu_init()
1999 complete_all(&gmu->pd_gate); in a6xx_gmu_init()
2000 gmu->pd_nb.notifier_call = cxpd_notifier_cb; in a6xx_gmu_init()
2003 * Get a link to the GX power domain to reset the GPU in case of GMU in a6xx_gmu_init()
2006 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); in a6xx_gmu_init()
2008 /* Get the power levels for the GMU and GPU */ in a6xx_gmu_init()
2009 a6xx_gmu_pwrlevels_probe(gmu); in a6xx_gmu_init()
2012 a6xx_hfi_init(gmu); in a6xx_gmu_init()
2015 a6xx_gmu_rpmh_init(gmu); in a6xx_gmu_init()
2017 gmu->initialized = true; in a6xx_gmu_init()
2025 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_init()
2028 iounmap(gmu->mmio); in a6xx_gmu_init()
2030 iounmap(gmu->rscc); in a6xx_gmu_init()
2031 free_irq(gmu->gmu_irq, gmu); in a6xx_gmu_init()
2032 free_irq(gmu->hfi_irq, gmu); in a6xx_gmu_init()
2035 a6xx_gmu_memory_free(gmu); in a6xx_gmu_init()
2038 put_device(gmu->dev); in a6xx_gmu_init()