Lines Matching +full:gfx +full:- +full:mem

37 #include <linux/pci-p2pdma.h>
38 #include <linux/apple-gmux.h>
87 #include <asm/intel-family.h>
100 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
148 #define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMD_IP_BLOCK_TYPE_NUM - 1, 0)
180 return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0; in amdgpu_ip_member_of_hwini()
188 adev->init_lvl = &amdgpu_init_minimal_xgmi; in amdgpu_set_init_level()
191 adev->init_lvl = &amdgpu_init_recovery; in amdgpu_set_init_level()
196 adev->init_lvl = &amdgpu_init_default; in amdgpu_set_init_level()
258 return -EINVAL; in amdgpu_sysfs_reg_state_get()
274 ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state); in amdgpu_reg_state_sysfs_init()
283 sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state); in amdgpu_reg_state_sysfs_fini()
290 if (ip_block->version->funcs->suspend) { in amdgpu_ip_block_suspend()
291 r = ip_block->version->funcs->suspend(ip_block); in amdgpu_ip_block_suspend()
293 dev_err(ip_block->adev->dev, in amdgpu_ip_block_suspend()
295 ip_block->version->funcs->name, r); in amdgpu_ip_block_suspend()
300 ip_block->status.hw = false; in amdgpu_ip_block_suspend()
308 if (ip_block->version->funcs->resume) { in amdgpu_ip_block_resume()
309 r = ip_block->version->funcs->resume(ip_block); in amdgpu_ip_block_resume()
311 dev_err(ip_block->adev->dev, in amdgpu_ip_block_resume()
313 ip_block->version->funcs->name, r); in amdgpu_ip_block_resume()
318 ip_block->status.hw = true; in amdgpu_ip_block_resume()
332 * - "cem" - PCIE CEM card
333 * - "oam" - Open Compute Accelerator Module
334 * - "unknown" - Not known
347 if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type) in amdgpu_device_get_board_info()
348 pkg_type = adev->smuio.funcs->get_pkg_type(adev); in amdgpu_device_get_board_info()
379 if (adev->flags & AMD_IS_APU) in amdgpu_board_attrs_is_visible()
382 return attr->mode; in amdgpu_board_attrs_is_visible()
394 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
405 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid()) in amdgpu_device_supports_px()
411 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
425 if (adev->has_pr3 || in amdgpu_device_supports_boco()
426 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid())) in amdgpu_device_supports_boco()
432 * amdgpu_device_supports_baco - Does the device support BACO
455 adev->pm.rpm_mode = AMDGPU_RUNPM_NONE; in amdgpu_device_detect_runtime_pm_mode()
461 adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; in amdgpu_device_detect_runtime_pm_mode()
462 dev_info(adev->dev, "Forcing BAMACO for runtime pm\n"); in amdgpu_device_detect_runtime_pm_mode()
464 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; in amdgpu_device_detect_runtime_pm_mode()
465 dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n"); in amdgpu_device_detect_runtime_pm_mode()
470 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; in amdgpu_device_detect_runtime_pm_mode()
471 dev_info(adev->dev, "Forcing BACO for runtime pm\n"); in amdgpu_device_detect_runtime_pm_mode()
474 case -1: in amdgpu_device_detect_runtime_pm_mode()
475 case -2: in amdgpu_device_detect_runtime_pm_mode()
477 adev->pm.rpm_mode = AMDGPU_RUNPM_PX; in amdgpu_device_detect_runtime_pm_mode()
478 dev_info(adev->dev, "Using ATPX for runtime pm\n"); in amdgpu_device_detect_runtime_pm_mode()
480 adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; in amdgpu_device_detect_runtime_pm_mode()
481 dev_info(adev->dev, "Using BOCO for runtime pm\n"); in amdgpu_device_detect_runtime_pm_mode()
486 switch (adev->asic_type) { in amdgpu_device_detect_runtime_pm_mode()
493 if (!adev->gmc.noretry) in amdgpu_device_detect_runtime_pm_mode()
494 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; in amdgpu_device_detect_runtime_pm_mode()
498 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; in amdgpu_device_detect_runtime_pm_mode()
502 if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) { in amdgpu_device_detect_runtime_pm_mode()
504 adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; in amdgpu_device_detect_runtime_pm_mode()
505 dev_info(adev->dev, "Using BAMACO for runtime pm\n"); in amdgpu_device_detect_runtime_pm_mode()
507 dev_info(adev->dev, "Using BACO for runtime pm\n"); in amdgpu_device_detect_runtime_pm_mode()
513 dev_info(adev->dev, "runtime pm is manually disabled\n"); in amdgpu_device_detect_runtime_pm_mode()
520 if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) in amdgpu_device_detect_runtime_pm_mode()
521 dev_info(adev->dev, "Runtime PM not available\n"); in amdgpu_device_detect_runtime_pm_mode()
524 * amdgpu_device_supports_smart_shift - Is the device dGPU with
543 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
549 * @write: true - write to vram, otherwise - read from vram
565 spin_lock_irqsave(&adev->mmio_idx_lock, flags); in amdgpu_device_mm_access()
580 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); in amdgpu_device_mm_access()
585 * amdgpu_device_aper_access - access vram by vram aperture
591 * @write: true - write to vram, otherwise - read from vram
603 if (!adev->mman.aper_base_kaddr) in amdgpu_device_aper_access()
606 last = min(pos + size, adev->gmc.visible_vram_size); in amdgpu_device_aper_access()
608 addr = adev->mman.aper_base_kaddr + pos; in amdgpu_device_aper_access()
609 count = last - pos; in amdgpu_device_aper_access()
636 * amdgpu_device_vram_access - read/write a buffer in vram
642 * @write: true - write to vram, otherwise - read from vram
651 size -= count; in amdgpu_device_vram_access()
667 if (adev->no_hw_access) in amdgpu_device_skip_hw_access()
683 if (down_read_trylock(&adev->reset_domain->sem)) in amdgpu_device_skip_hw_access()
684 up_read(&adev->reset_domain->sem); in amdgpu_device_skip_hw_access()
686 lockdep_assert_held(&adev->reset_domain->sem); in amdgpu_device_skip_hw_access()
693 * amdgpu_device_rreg - read a memory mapped IO or indirect register
709 if ((reg * 4) < adev->rmmio_size) { in amdgpu_device_rreg()
712 down_read_trylock(&adev->reset_domain->sem)) { in amdgpu_device_rreg()
714 up_read(&adev->reset_domain->sem); in amdgpu_device_rreg()
716 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); in amdgpu_device_rreg()
719 ret = adev->pcie_rreg(adev, reg * 4); in amdgpu_device_rreg()
722 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret); in amdgpu_device_rreg()
733 * amdgpu_mm_rreg8 - read a memory mapped IO register
745 if (offset < adev->rmmio_size) in amdgpu_mm_rreg8()
746 return (readb(adev->rmmio + offset)); in amdgpu_mm_rreg8()
752 * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
770 if ((reg * 4) < adev->rmmio_size) { in amdgpu_device_xcc_rreg()
773 adev->gfx.rlc.rlcg_reg_access_supported && in amdgpu_device_xcc_rreg()
780 down_read_trylock(&adev->reset_domain->sem)) { in amdgpu_device_xcc_rreg()
782 up_read(&adev->reset_domain->sem); in amdgpu_device_xcc_rreg()
784 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); in amdgpu_device_xcc_rreg()
787 ret = adev->pcie_rreg(adev, reg * 4); in amdgpu_device_xcc_rreg()
800 * amdgpu_mm_wreg8 - read a memory mapped IO register
813 if (offset < adev->rmmio_size) in amdgpu_mm_wreg8()
814 writeb(value, adev->rmmio + offset); in amdgpu_mm_wreg8()
820 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
836 if ((reg * 4) < adev->rmmio_size) { in amdgpu_device_wreg()
839 down_read_trylock(&adev->reset_domain->sem)) { in amdgpu_device_wreg()
841 up_read(&adev->reset_domain->sem); in amdgpu_device_wreg()
843 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); in amdgpu_device_wreg()
846 adev->pcie_wreg(adev, reg * 4, v); in amdgpu_device_wreg()
849 trace_amdgpu_device_wreg(adev->pdev->device, reg, v); in amdgpu_device_wreg()
853 …* amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if i…
870 adev->gfx.rlc.funcs && in amdgpu_mm_wreg_mmio_rlc()
871 adev->gfx.rlc.funcs->is_rlcg_access_range) { in amdgpu_mm_wreg_mmio_rlc()
872 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) in amdgpu_mm_wreg_mmio_rlc()
874 } else if ((reg * 4) >= adev->rmmio_size) { in amdgpu_mm_wreg_mmio_rlc()
875 adev->pcie_wreg(adev, reg * 4, v); in amdgpu_mm_wreg_mmio_rlc()
877 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); in amdgpu_mm_wreg_mmio_rlc()
882 * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
901 if ((reg * 4) < adev->rmmio_size) { in amdgpu_device_xcc_wreg()
904 adev->gfx.rlc.rlcg_reg_access_supported && in amdgpu_device_xcc_wreg()
911 down_read_trylock(&adev->reset_domain->sem)) { in amdgpu_device_xcc_wreg()
913 up_read(&adev->reset_domain->sem); in amdgpu_device_xcc_wreg()
915 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); in amdgpu_device_xcc_wreg()
918 adev->pcie_wreg(adev, reg * 4, v); in amdgpu_device_xcc_wreg()
923 * amdgpu_device_indirect_rreg - read an indirect register
938 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_rreg()
939 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_rreg()
941 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg()
942 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_rreg()
943 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_rreg()
948 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg()
962 if (unlikely(!adev->nbio.funcs)) { in amdgpu_device_indirect_rreg_ext()
966 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_rreg_ext()
967 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_rreg_ext()
971 if (unlikely(!adev->nbio.funcs)) in amdgpu_device_indirect_rreg_ext()
974 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); in amdgpu_device_indirect_rreg_ext()
979 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg_ext()
980 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_rreg_ext()
981 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_rreg_ext()
983 pcie_index_hi_offset = (void __iomem *)adev->rmmio + in amdgpu_device_indirect_rreg_ext()
1000 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg_ext()
1006 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
1021 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_rreg64()
1022 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_rreg64()
1024 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg64()
1025 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_rreg64()
1026 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_rreg64()
1036 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg64()
1051 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_rreg64_ext()
1052 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_rreg64_ext()
1053 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) in amdgpu_device_indirect_rreg64_ext()
1054 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); in amdgpu_device_indirect_rreg64_ext()
1056 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg64_ext()
1057 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_rreg64_ext()
1058 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_rreg64_ext()
1060 pcie_index_hi_offset = (void __iomem *)adev->rmmio + in amdgpu_device_indirect_rreg64_ext()
1086 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_rreg64_ext()
1092 * amdgpu_device_indirect_wreg - write an indirect register address
1106 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_wreg()
1107 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_wreg()
1109 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg()
1110 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_wreg()
1111 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_wreg()
1117 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg()
1128 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_wreg_ext()
1129 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_wreg_ext()
1130 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) in amdgpu_device_indirect_wreg_ext()
1131 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); in amdgpu_device_indirect_wreg_ext()
1135 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg_ext()
1136 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_wreg_ext()
1137 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_wreg_ext()
1139 pcie_index_hi_offset = (void __iomem *)adev->rmmio + in amdgpu_device_indirect_wreg_ext()
1157 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg_ext()
1161 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
1175 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_wreg64()
1176 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_wreg64()
1178 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg64()
1179 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_wreg64()
1180 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_wreg64()
1192 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg64()
1204 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); in amdgpu_device_indirect_wreg64_ext()
1205 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); in amdgpu_device_indirect_wreg64_ext()
1206 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) in amdgpu_device_indirect_wreg64_ext()
1207 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev); in amdgpu_device_indirect_wreg64_ext()
1209 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg64_ext()
1210 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; in amdgpu_device_indirect_wreg64_ext()
1211 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; in amdgpu_device_indirect_wreg64_ext()
1213 pcie_index_hi_offset = (void __iomem *)adev->rmmio + in amdgpu_device_indirect_wreg64_ext()
1241 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_indirect_wreg64_ext()
1245 * amdgpu_device_get_rev_id - query device rev_id
1253 return adev->nbio.funcs->get_rev_id(adev); in amdgpu_device_get_rev_id()
1257 * amdgpu_invalid_rreg - dummy reg read function
1281 * amdgpu_invalid_wreg - dummy reg write function
1305 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
1329 * amdgpu_invalid_wreg64 - dummy reg write function
1353 * amdgpu_block_invalid_rreg - dummy reg read function
1373 * amdgpu_block_invalid_wreg - dummy reg write function
1393 * amdgpu_device_asic_init - Wrapper for atom asic_init
1413 return amdgpu_atom_asic_init(adev->mode_info.atom_context); in amdgpu_device_asic_init()
1420 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
1432 &adev->mem_scratch.robj, in amdgpu_device_mem_scratch_init()
1433 &adev->mem_scratch.gpu_addr, in amdgpu_device_mem_scratch_init()
1434 (void **)&adev->mem_scratch.ptr); in amdgpu_device_mem_scratch_init()
1438 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
1446 amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL); in amdgpu_device_mem_scratch_fini()
1450 * amdgpu_device_program_register_sequence - program an array of registers.
1479 if (adev->family >= AMDGPU_FAMILY_AI) in amdgpu_device_program_register_sequence()
1489 * amdgpu_device_pci_config_reset - reset the GPU
1498 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); in amdgpu_device_pci_config_reset()
1502 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1510 return pci_reset_function(adev->pdev); in amdgpu_device_pci_reset()
1520 * amdgpu_device_wb_fini - Disable Writeback and free memory
1529 if (adev->wb.wb_obj) { in amdgpu_device_wb_fini()
1530 amdgpu_bo_free_kernel(&adev->wb.wb_obj, in amdgpu_device_wb_fini()
1531 &adev->wb.gpu_addr, in amdgpu_device_wb_fini()
1532 (void **)&adev->wb.wb); in amdgpu_device_wb_fini()
1533 adev->wb.wb_obj = NULL; in amdgpu_device_wb_fini()
1538 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1544 * Returns 0 on success or an -error on failure.
1550 if (adev->wb.wb_obj == NULL) { in amdgpu_device_wb_init()
1554 &adev->wb.wb_obj, &adev->wb.gpu_addr, in amdgpu_device_wb_init()
1555 (void **)&adev->wb.wb); in amdgpu_device_wb_init()
1557 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); in amdgpu_device_wb_init()
1561 adev->wb.num_wb = AMDGPU_MAX_WB; in amdgpu_device_wb_init()
1562 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); in amdgpu_device_wb_init()
1565 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8); in amdgpu_device_wb_init()
1572 * amdgpu_device_wb_get - Allocate a wb entry
1578 * Returns 0 on success or -EINVAL on failure.
1584 spin_lock_irqsave(&adev->wb.lock, flags); in amdgpu_device_wb_get()
1585 offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); in amdgpu_device_wb_get()
1586 if (offset < adev->wb.num_wb) { in amdgpu_device_wb_get()
1587 __set_bit(offset, adev->wb.used); in amdgpu_device_wb_get()
1588 spin_unlock_irqrestore(&adev->wb.lock, flags); in amdgpu_device_wb_get()
1592 spin_unlock_irqrestore(&adev->wb.lock, flags); in amdgpu_device_wb_get()
1593 return -EINVAL; in amdgpu_device_wb_get()
1598 * amdgpu_device_wb_free - Free a wb entry
1610 spin_lock_irqsave(&adev->wb.lock, flags); in amdgpu_device_wb_free()
1611 if (wb < adev->wb.num_wb) in amdgpu_device_wb_free()
1612 __clear_bit(wb, adev->wb.used); in amdgpu_device_wb_free()
1613 spin_unlock_irqrestore(&adev->wb.lock, flags); in amdgpu_device_wb_free()
1617 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1623 * driver loading by returning -ENODEV.
1627 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size); in amdgpu_device_resize_fb_bar()
1643 adev->pdev->vendor == PCI_VENDOR_ID_ATI && in amdgpu_device_resize_fb_bar()
1644 adev->pdev->device == 0x731f && in amdgpu_device_resize_fb_bar()
1645 adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) in amdgpu_device_resize_fb_bar()
1649 if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR)) in amdgpu_device_resize_fb_bar()
1653 if (adev->gmc.real_vram_size && in amdgpu_device_resize_fb_bar()
1654 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size)) in amdgpu_device_resize_fb_bar()
1658 root = adev->pdev->bus; in amdgpu_device_resize_fb_bar()
1659 while (root->parent) in amdgpu_device_resize_fb_bar()
1660 root = root->parent; in amdgpu_device_resize_fb_bar()
1663 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) && in amdgpu_device_resize_fb_bar()
1664 res->start > 0x100000000ull) in amdgpu_device_resize_fb_bar()
1673 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1, in amdgpu_device_resize_fb_bar()
1677 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); in amdgpu_device_resize_fb_bar()
1678 pci_write_config_word(adev->pdev, PCI_COMMAND, in amdgpu_device_resize_fb_bar()
1683 if (adev->asic_type >= CHIP_BONAIRE) in amdgpu_device_resize_fb_bar()
1684 pci_release_resource(adev->pdev, 2); in amdgpu_device_resize_fb_bar()
1686 pci_release_resource(adev->pdev, 0); in amdgpu_device_resize_fb_bar()
1688 r = pci_resize_resource(adev->pdev, 0, rbar_size); in amdgpu_device_resize_fb_bar()
1689 if (r == -ENOSPC) in amdgpu_device_resize_fb_bar()
1691 else if (r && r != -ENOTSUPP) in amdgpu_device_resize_fb_bar()
1694 pci_assign_unassigned_bus_resources(adev->pdev->bus); in amdgpu_device_resize_fb_bar()
1700 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) in amdgpu_device_resize_fb_bar()
1701 return -ENODEV; in amdgpu_device_resize_fb_bar()
1703 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd); in amdgpu_device_resize_fb_bar()
1710 if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU)) in amdgpu_device_read_bios()
1720 * amdgpu_device_need_post - check if the hw need post or not
1739 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot in amdgpu_device_need_post()
1744 if (adev->asic_type == CHIP_FIJI) { in amdgpu_device_need_post()
1748 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); in amdgpu_device_need_post()
1753 fw_ver = *((uint32_t *)adev->pm.fw->data + 69); in amdgpu_device_need_post()
1754 release_firmware(adev->pm.fw); in amdgpu_device_need_post()
1761 if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) in amdgpu_device_need_post()
1764 if (adev->has_hw_reset) { in amdgpu_device_need_post()
1765 adev->has_hw_reset = false; in amdgpu_device_need_post()
1770 if (adev->asic_type >= CHIP_BONAIRE) in amdgpu_device_need_post()
1792 case -1: in amdgpu_device_seamless_boot_supported()
1804 if (!(adev->flags & AMD_IS_APU)) in amdgpu_device_seamless_boot_supported()
1807 if (adev->mman.keep_stolen_vga_memory) in amdgpu_device_seamless_boot_supported()
1818 …gn/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-
1819 * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1827 if (dev_is_removable(adev->dev)) in amdgpu_device_pcie_dynamic_switching_supported()
1830 if (c->x86_vendor == X86_VENDOR_INTEL) in amdgpu_device_pcie_dynamic_switching_supported()
1837 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1849 case -1: in amdgpu_device_should_use_aspm()
1858 if (adev->flags & AMD_IS_APU) in amdgpu_device_should_use_aspm()
1860 if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) in amdgpu_device_should_use_aspm()
1862 return pcie_aspm_enabled(adev->pdev); in amdgpu_device_should_use_aspm()
1867 * amdgpu_device_vga_set_decode - enable/disable vga decode
1889 * amdgpu_device_check_block_size - validate the vm block size
1904 if (amdgpu_vm_block_size == -1) in amdgpu_device_check_block_size()
1908 dev_warn(adev->dev, "VM page table size (%d) too small\n", in amdgpu_device_check_block_size()
1910 amdgpu_vm_block_size = -1; in amdgpu_device_check_block_size()
1915 * amdgpu_device_check_vm_size - validate the vm size
1925 if (amdgpu_vm_size == -1) in amdgpu_device_check_vm_size()
1929 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", in amdgpu_device_check_vm_size()
1931 amdgpu_vm_size = -1; in amdgpu_device_check_vm_size()
1947 DRM_WARN("Not 64-bit OS, feature not supported\n"); in amdgpu_device_check_smu_prv_buffer_size()
1965 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; in amdgpu_device_check_smu_prv_buffer_size()
1972 adev->pm.smu_prv_buffer_size = 0; in amdgpu_device_check_smu_prv_buffer_size()
1977 if (!(adev->flags & AMD_IS_APU) || in amdgpu_device_init_apu_flags()
1978 adev->asic_type < CHIP_RAVEN) in amdgpu_device_init_apu_flags()
1981 switch (adev->asic_type) { in amdgpu_device_init_apu_flags()
1983 if (adev->pdev->device == 0x15dd) in amdgpu_device_init_apu_flags()
1984 adev->apu_flags |= AMD_APU_IS_RAVEN; in amdgpu_device_init_apu_flags()
1985 if (adev->pdev->device == 0x15d8) in amdgpu_device_init_apu_flags()
1986 adev->apu_flags |= AMD_APU_IS_PICASSO; in amdgpu_device_init_apu_flags()
1989 if ((adev->pdev->device == 0x1636) || in amdgpu_device_init_apu_flags()
1990 (adev->pdev->device == 0x164c)) in amdgpu_device_init_apu_flags()
1991 adev->apu_flags |= AMD_APU_IS_RENOIR; in amdgpu_device_init_apu_flags()
1993 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE; in amdgpu_device_init_apu_flags()
1996 adev->apu_flags |= AMD_APU_IS_VANGOGH; in amdgpu_device_init_apu_flags()
2001 if ((adev->pdev->device == 0x13FE) || in amdgpu_device_init_apu_flags()
2002 (adev->pdev->device == 0x143F)) in amdgpu_device_init_apu_flags()
2003 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2; in amdgpu_device_init_apu_flags()
2013 * amdgpu_device_check_arguments - validate module params
2025 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", in amdgpu_device_check_arguments()
2029 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", in amdgpu_device_check_arguments()
2034 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { in amdgpu_device_check_arguments()
2036 dev_warn(adev->dev, "gart size (%d) too small\n", in amdgpu_device_check_arguments()
2038 amdgpu_gart_size = -1; in amdgpu_device_check_arguments()
2041 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { in amdgpu_device_check_arguments()
2043 dev_warn(adev->dev, "gtt size (%d) too small\n", in amdgpu_device_check_arguments()
2045 amdgpu_gtt_size = -1; in amdgpu_device_check_arguments()
2049 if (amdgpu_vm_fragment_size != -1 && in amdgpu_device_check_arguments()
2051 dev_warn(adev->dev, "valid range is between 4 and 9\n"); in amdgpu_device_check_arguments()
2052 amdgpu_vm_fragment_size = -1; in amdgpu_device_check_arguments()
2056 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n", in amdgpu_device_check_arguments()
2060 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n", in amdgpu_device_check_arguments()
2065 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) { in amdgpu_device_check_arguments()
2066 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n"); in amdgpu_device_check_arguments()
2067 amdgpu_reset_method = -1; in amdgpu_device_check_arguments()
2076 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); in amdgpu_device_check_arguments()
2079 adev->enforce_isolation[i] = !!enforce_isolation; in amdgpu_device_check_arguments()
2085 * amdgpu_switcheroo_set_state - set switcheroo state
2105 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; in amdgpu_switcheroo_set_state()
2114 dev->switch_power_state = DRM_SWITCH_POWER_ON; in amdgpu_switcheroo_set_state()
2117 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; in amdgpu_switcheroo_set_state()
2124 dev->switch_power_state = DRM_SWITCH_POWER_OFF; in amdgpu_switcheroo_set_state()
2129 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
2146 return atomic_read(&dev->open_count) == 0; in amdgpu_switcheroo_can_switch()
2156 * amdgpu_device_ip_set_clockgating_state - set the CG state
2159 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2173 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_set_clockgating_state()
2174 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_set_clockgating_state()
2176 if (adev->ip_blocks[i].version->type != block_type) in amdgpu_device_ip_set_clockgating_state()
2178 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state) in amdgpu_device_ip_set_clockgating_state()
2180 r = adev->ip_blocks[i].version->funcs->set_clockgating_state( in amdgpu_device_ip_set_clockgating_state()
2181 &adev->ip_blocks[i], state); in amdgpu_device_ip_set_clockgating_state()
2184 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_set_clockgating_state()
2190 * amdgpu_device_ip_set_powergating_state - set the PG state
2193 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2207 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_set_powergating_state()
2208 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_set_powergating_state()
2210 if (adev->ip_blocks[i].version->type != block_type) in amdgpu_device_ip_set_powergating_state()
2212 if (!adev->ip_blocks[i].version->funcs->set_powergating_state) in amdgpu_device_ip_set_powergating_state()
2214 r = adev->ip_blocks[i].version->funcs->set_powergating_state( in amdgpu_device_ip_set_powergating_state()
2215 &adev->ip_blocks[i], state); in amdgpu_device_ip_set_powergating_state()
2218 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_set_powergating_state()
2224 * amdgpu_device_ip_get_clockgating_state - get the CG state
2239 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_get_clockgating_state()
2240 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_get_clockgating_state()
2242 if (adev->ip_blocks[i].version->funcs->get_clockgating_state) in amdgpu_device_ip_get_clockgating_state()
2243 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags); in amdgpu_device_ip_get_clockgating_state()
2248 * amdgpu_device_ip_wait_for_idle - wait for idle
2251 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2261 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_wait_for_idle()
2262 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_wait_for_idle()
2264 if (adev->ip_blocks[i].version->type == block_type) { in amdgpu_device_ip_wait_for_idle()
2265 if (adev->ip_blocks[i].version->funcs->wait_for_idle) { in amdgpu_device_ip_wait_for_idle()
2266 r = adev->ip_blocks[i].version->funcs->wait_for_idle( in amdgpu_device_ip_wait_for_idle()
2267 &adev->ip_blocks[i]); in amdgpu_device_ip_wait_for_idle()
2279 * amdgpu_device_ip_is_valid - is the hardware IP enabled
2282 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2292 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_is_valid()
2293 if (adev->ip_blocks[i].version->type == block_type) in amdgpu_device_ip_is_valid()
2294 return adev->ip_blocks[i].status.valid; in amdgpu_device_ip_is_valid()
2301 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
2304 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
2315 for (i = 0; i < adev->num_ip_blocks; i++) in amdgpu_device_ip_get_ip_block()
2316 if (adev->ip_blocks[i].version->type == type) in amdgpu_device_ip_get_ip_block()
2317 return &adev->ip_blocks[i]; in amdgpu_device_ip_get_ip_block()
2339 if (ip_block && ((ip_block->version->major > major) || in amdgpu_device_ip_block_version_cmp()
2340 ((ip_block->version->major == major) && in amdgpu_device_ip_block_version_cmp()
2341 (ip_block->version->minor >= minor)))) in amdgpu_device_ip_block_version_cmp()
2360 return -EINVAL; in amdgpu_device_ip_block_add()
2362 switch (ip_block_version->type) { in amdgpu_device_ip_block_add()
2364 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK) in amdgpu_device_ip_block_add()
2368 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK) in amdgpu_device_ip_block_add()
2375 dev_info(adev->dev, "detected ip block number %d <%s>\n", in amdgpu_device_ip_block_add()
2376 adev->num_ip_blocks, ip_block_version->funcs->name); in amdgpu_device_ip_block_add()
2378 adev->ip_blocks[adev->num_ip_blocks].adev = adev; in amdgpu_device_ip_block_add()
2380 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; in amdgpu_device_ip_block_add()
2386 * amdgpu_device_enable_virtual_display - enable virtual display feature
2399 adev->enable_virtual_display = false; in amdgpu_device_enable_virtual_display()
2402 const char *pci_address_name = pci_name(adev->pdev); in amdgpu_device_enable_virtual_display()
2412 int res = -1; in amdgpu_device_enable_virtual_display()
2414 adev->enable_virtual_display = true; in amdgpu_device_enable_virtual_display()
2425 adev->mode_info.num_crtc = num_crtc; in amdgpu_device_enable_virtual_display()
2427 adev->mode_info.num_crtc = 1; in amdgpu_device_enable_virtual_display()
2435 adev->enable_virtual_display, adev->mode_info.num_crtc); in amdgpu_device_enable_virtual_display()
2443 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) { in amdgpu_device_set_sriov_virtual_display()
2444 adev->mode_info.num_crtc = 1; in amdgpu_device_set_sriov_virtual_display()
2445 adev->enable_virtual_display = true; in amdgpu_device_set_sriov_virtual_display()
2447 adev->enable_virtual_display, adev->mode_info.num_crtc); in amdgpu_device_set_sriov_virtual_display()
2452 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
2459 * Returns 0 on success, -EINVAL on failure.
2467 adev->firmware.gpu_info_fw = NULL; in amdgpu_device_parse_gpu_info_fw()
2469 if (adev->mman.discovery_bin) in amdgpu_device_parse_gpu_info_fw()
2472 switch (adev->asic_type) { in amdgpu_device_parse_gpu_info_fw()
2482 if (adev->apu_flags & AMD_APU_IS_RAVEN2) in amdgpu_device_parse_gpu_info_fw()
2484 else if (adev->apu_flags & AMD_APU_IS_PICASSO) in amdgpu_device_parse_gpu_info_fw()
2497 err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, in amdgpu_device_parse_gpu_info_fw()
2501 dev_err(adev->dev, in amdgpu_device_parse_gpu_info_fw()
2507 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data; in amdgpu_device_parse_gpu_info_fw()
2508 amdgpu_ucode_print_gpu_info_hdr(&hdr->header); in amdgpu_device_parse_gpu_info_fw()
2510 switch (hdr->version_major) { in amdgpu_device_parse_gpu_info_fw()
2514 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + in amdgpu_device_parse_gpu_info_fw()
2515 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); in amdgpu_device_parse_gpu_info_fw()
2520 if (adev->asic_type == CHIP_NAVI12) in amdgpu_device_parse_gpu_info_fw()
2523 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); in amdgpu_device_parse_gpu_info_fw()
2524 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); in amdgpu_device_parse_gpu_info_fw()
2525 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); in amdgpu_device_parse_gpu_info_fw()
2526 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se); in amdgpu_device_parse_gpu_info_fw()
2527 adev->gfx.config.max_texture_channel_caches = in amdgpu_device_parse_gpu_info_fw()
2528 le32_to_cpu(gpu_info_fw->gc_num_tccs); in amdgpu_device_parse_gpu_info_fw()
2529 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs); in amdgpu_device_parse_gpu_info_fw()
2530 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds); in amdgpu_device_parse_gpu_info_fw()
2531 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth); in amdgpu_device_parse_gpu_info_fw()
2532 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth); in amdgpu_device_parse_gpu_info_fw()
2533 adev->gfx.config.double_offchip_lds_buf = in amdgpu_device_parse_gpu_info_fw()
2534 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer); in amdgpu_device_parse_gpu_info_fw()
2535 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size); in amdgpu_device_parse_gpu_info_fw()
2536 adev->gfx.cu_info.max_waves_per_simd = in amdgpu_device_parse_gpu_info_fw()
2537 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd); in amdgpu_device_parse_gpu_info_fw()
2538 adev->gfx.cu_info.max_scratch_slots_per_cu = in amdgpu_device_parse_gpu_info_fw()
2539 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu); in amdgpu_device_parse_gpu_info_fw()
2540 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size); in amdgpu_device_parse_gpu_info_fw()
2541 if (hdr->version_minor >= 1) { in amdgpu_device_parse_gpu_info_fw()
2543 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data + in amdgpu_device_parse_gpu_info_fw()
2544 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); in amdgpu_device_parse_gpu_info_fw()
2545 adev->gfx.config.num_sc_per_sh = in amdgpu_device_parse_gpu_info_fw()
2546 le32_to_cpu(gpu_info_fw->num_sc_per_sh); in amdgpu_device_parse_gpu_info_fw()
2547 adev->gfx.config.num_packer_per_sc = in amdgpu_device_parse_gpu_info_fw()
2548 le32_to_cpu(gpu_info_fw->num_packer_per_sc); in amdgpu_device_parse_gpu_info_fw()
2556 if (hdr->version_minor == 2) { in amdgpu_device_parse_gpu_info_fw()
2558 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data + in amdgpu_device_parse_gpu_info_fw()
2559 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); in amdgpu_device_parse_gpu_info_fw()
2560 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box; in amdgpu_device_parse_gpu_info_fw()
2565 dev_err(adev->dev, in amdgpu_device_parse_gpu_info_fw()
2566 "Unsupported gpu_info table %d\n", hdr->header.ucode_version); in amdgpu_device_parse_gpu_info_fw()
2567 err = -EINVAL; in amdgpu_device_parse_gpu_info_fw()
2575 * amdgpu_device_ip_early_init - run early init for hardware IPs
2599 switch (adev->asic_type) { in amdgpu_device_ip_early_init()
2606 adev->family = AMDGPU_FAMILY_SI; in amdgpu_device_ip_early_init()
2618 if (adev->flags & AMD_IS_APU) in amdgpu_device_ip_early_init()
2619 adev->family = AMDGPU_FAMILY_KV; in amdgpu_device_ip_early_init()
2621 adev->family = AMDGPU_FAMILY_CI; in amdgpu_device_ip_early_init()
2637 if (adev->flags & AMD_IS_APU) in amdgpu_device_ip_early_init()
2638 adev->family = AMDGPU_FAMILY_CZ; in amdgpu_device_ip_early_init()
2640 adev->family = AMDGPU_FAMILY_VI; in amdgpu_device_ip_early_init()
2656 ((adev->flags & AMD_IS_APU) == 0) && in amdgpu_device_ip_early_init()
2657 !dev_is_removable(&adev->pdev->dev)) in amdgpu_device_ip_early_init()
2658 adev->flags |= AMD_IS_PX; in amdgpu_device_ip_early_init()
2660 if (!(adev->flags & AMD_IS_APU)) { in amdgpu_device_ip_early_init()
2661 parent = pcie_find_root_port(adev->pdev); in amdgpu_device_ip_early_init()
2662 adev->has_pr3 = parent ? pci_pr3_present(parent) : false; in amdgpu_device_ip_early_init()
2666 adev->pm.pp_feature = amdgpu_pp_feature_mask; in amdgpu_device_ip_early_init()
2668 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; in amdgpu_device_ip_early_init()
2669 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) in amdgpu_device_ip_early_init()
2670 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK; in amdgpu_device_ip_early_init()
2672 adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK; in amdgpu_device_ip_early_init()
2675 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_early_init()
2676 ip_block = &adev->ip_blocks[i]; in amdgpu_device_ip_early_init()
2680 i, adev->ip_blocks[i].version->funcs->name); in amdgpu_device_ip_early_init()
2681 adev->ip_blocks[i].status.valid = false; in amdgpu_device_ip_early_init()
2682 } else if (ip_block->version->funcs->early_init) { in amdgpu_device_ip_early_init()
2683 r = ip_block->version->funcs->early_init(ip_block); in amdgpu_device_ip_early_init()
2684 if (r == -ENOENT) { in amdgpu_device_ip_early_init()
2685 adev->ip_blocks[i].status.valid = false; in amdgpu_device_ip_early_init()
2688 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_early_init()
2691 adev->ip_blocks[i].status.valid = true; in amdgpu_device_ip_early_init()
2694 adev->ip_blocks[i].status.valid = true; in amdgpu_device_ip_early_init()
2697 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { in amdgpu_device_ip_early_init()
2705 return -EINVAL; in amdgpu_device_ip_early_init()
2709 dev_err(adev->dev, "amdgpu_atombios_init failed\n"); in amdgpu_device_ip_early_init()
2722 return -ENODEV; in amdgpu_device_ip_early_init()
2725 if (ip_block->status.valid != false) in amdgpu_device_ip_early_init()
2728 adev->cg_flags &= amdgpu_cg_mask; in amdgpu_device_ip_early_init()
2729 adev->pg_flags &= amdgpu_pg_mask; in amdgpu_device_ip_early_init()
2738 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_hw_init_phase1()
2739 if (!adev->ip_blocks[i].status.sw) in amdgpu_device_ip_hw_init_phase1()
2741 if (adev->ip_blocks[i].status.hw) in amdgpu_device_ip_hw_init_phase1()
2744 adev, adev->ip_blocks[i].version->type)) in amdgpu_device_ip_hw_init_phase1()
2746 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || in amdgpu_device_ip_hw_init_phase1()
2747 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) || in amdgpu_device_ip_hw_init_phase1()
2748 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { in amdgpu_device_ip_hw_init_phase1()
2749 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); in amdgpu_device_ip_hw_init_phase1()
2752 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_hw_init_phase1()
2755 adev->ip_blocks[i].status.hw = true; in amdgpu_device_ip_hw_init_phase1()
2766 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_hw_init_phase2()
2767 if (!adev->ip_blocks[i].status.sw) in amdgpu_device_ip_hw_init_phase2()
2769 if (adev->ip_blocks[i].status.hw) in amdgpu_device_ip_hw_init_phase2()
2772 adev, adev->ip_blocks[i].version->type)) in amdgpu_device_ip_hw_init_phase2()
2774 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); in amdgpu_device_ip_hw_init_phase2()
2777 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_hw_init_phase2()
2780 adev->ip_blocks[i].status.hw = true; in amdgpu_device_ip_hw_init_phase2()
2792 if (adev->asic_type >= CHIP_VEGA10) { in amdgpu_device_fw_loading()
2793 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_fw_loading()
2794 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) in amdgpu_device_fw_loading()
2801 if (!adev->ip_blocks[i].status.sw) in amdgpu_device_fw_loading()
2805 if (adev->ip_blocks[i].status.hw == true) in amdgpu_device_fw_loading()
2808 if (amdgpu_in_reset(adev) || adev->in_suspend) { in amdgpu_device_fw_loading()
2809 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); in amdgpu_device_fw_loading()
2813 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); in amdgpu_device_fw_loading()
2816 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_fw_loading()
2819 adev->ip_blocks[i].status.hw = true; in amdgpu_device_fw_loading()
2825 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA) in amdgpu_device_fw_loading()
2837 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_device_init_schedulers()
2840 if (!ring || ring->no_scheduler) in amdgpu_device_init_schedulers()
2843 switch (ring->funcs->type) { in amdgpu_device_init_schedulers()
2845 timeout = adev->gfx_timeout; in amdgpu_device_init_schedulers()
2848 timeout = adev->compute_timeout; in amdgpu_device_init_schedulers()
2851 timeout = adev->sdma_timeout; in amdgpu_device_init_schedulers()
2854 timeout = adev->video_timeout; in amdgpu_device_init_schedulers()
2858 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL, in amdgpu_device_init_schedulers()
2860 ring->num_hw_submission, 0, in amdgpu_device_init_schedulers()
2861 timeout, adev->reset_domain->wq, in amdgpu_device_init_schedulers()
2862 ring->sched_score, ring->name, in amdgpu_device_init_schedulers()
2863 adev->dev); in amdgpu_device_init_schedulers()
2866 ring->name); in amdgpu_device_init_schedulers()
2872 ring->name); in amdgpu_device_init_schedulers()
2878 ring->name); in amdgpu_device_init_schedulers()
2890 * amdgpu_device_ip_init - run init for hardware IPs
2909 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_init()
2910 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_init()
2912 if (adev->ip_blocks[i].version->funcs->sw_init) { in amdgpu_device_ip_init()
2913 r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]); in amdgpu_device_ip_init()
2916 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_init()
2920 adev->ip_blocks[i].status.sw = true; in amdgpu_device_ip_init()
2923 adev, adev->ip_blocks[i].version->type)) in amdgpu_device_ip_init()
2926 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { in amdgpu_device_ip_init()
2928 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); in amdgpu_device_ip_init()
2933 adev->ip_blocks[i].status.hw = true; in amdgpu_device_ip_init()
2934 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { in amdgpu_device_ip_init()
2935 /* need to do gmc hw init early so we can allocate gpu mem */ in amdgpu_device_ip_init()
2945 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); in amdgpu_device_ip_init()
2955 adev->ip_blocks[i].status.hw = true; in amdgpu_device_ip_init()
2958 if (adev->gfx.mcbp) { in amdgpu_device_ip_init()
2959 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, in amdgpu_device_ip_init()
2982 dev_err(adev->dev, "IB initialization failed (%d).\n", r); in amdgpu_device_ip_init()
3018 init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI); in amdgpu_device_ip_init()
3026 if (adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_device_ip_init()
3032 r = -ENOENT; in amdgpu_device_ip_init()
3036 if (!hive->reset_domain || in amdgpu_device_ip_init()
3037 !amdgpu_reset_get_reset_domain(hive->reset_domain)) { in amdgpu_device_ip_init()
3038 r = -ENOENT; in amdgpu_device_ip_init()
3044 amdgpu_reset_put_reset_domain(adev->reset_domain); in amdgpu_device_ip_init()
3045 adev->reset_domain = hive->reset_domain; in amdgpu_device_ip_init()
3055 if (adev->mman.buffer_funcs_ring->sched.ready) in amdgpu_device_ip_init()
3059 if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { in amdgpu_device_ip_init()
3072 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
3082 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); in amdgpu_device_fill_reset_magic()
3086 * amdgpu_device_check_vram_lost - check if vram is valid
3097 if (memcmp(adev->gart.ptr, adev->reset_magic, in amdgpu_device_check_vram_lost()
3118 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
3138 for (j = 0; j < adev->num_ip_blocks; j++) { in amdgpu_device_set_cg_state()
3139 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; in amdgpu_device_set_cg_state()
3140 if (!adev->ip_blocks[i].status.late_initialized) in amdgpu_device_set_cg_state()
3142 /* skip CG for GFX, SDMA on S0ix */ in amdgpu_device_set_cg_state()
3143 if (adev->in_s0ix && in amdgpu_device_set_cg_state()
3144 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || in amdgpu_device_set_cg_state()
3145 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) in amdgpu_device_set_cg_state()
3148 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && in amdgpu_device_set_cg_state()
3149 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && in amdgpu_device_set_cg_state()
3150 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && in amdgpu_device_set_cg_state()
3151 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && in amdgpu_device_set_cg_state()
3152 adev->ip_blocks[i].version->funcs->set_clockgating_state) { in amdgpu_device_set_cg_state()
3154 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i], in amdgpu_device_set_cg_state()
3158 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_set_cg_state()
3175 for (j = 0; j < adev->num_ip_blocks; j++) { in amdgpu_device_set_pg_state()
3176 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; in amdgpu_device_set_pg_state()
3177 if (!adev->ip_blocks[i].status.late_initialized) in amdgpu_device_set_pg_state()
3179 /* skip PG for GFX, SDMA on S0ix */ in amdgpu_device_set_pg_state()
3180 if (adev->in_s0ix && in amdgpu_device_set_pg_state()
3181 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || in amdgpu_device_set_pg_state()
3182 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) in amdgpu_device_set_pg_state()
3185 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && in amdgpu_device_set_pg_state()
3186 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && in amdgpu_device_set_pg_state()
3187 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && in amdgpu_device_set_pg_state()
3188 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && in amdgpu_device_set_pg_state()
3189 adev->ip_blocks[i].version->funcs->set_powergating_state) { in amdgpu_device_set_pg_state()
3191 r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i], in amdgpu_device_set_pg_state()
3195 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_set_pg_state()
3221 adev = gpu_ins->adev; in amdgpu_device_enable_mgpu_fan_boost()
3222 if (!(adev->flags & AMD_IS_APU) && in amdgpu_device_enable_mgpu_fan_boost()
3223 !gpu_ins->mgpu_fan_enabled) { in amdgpu_device_enable_mgpu_fan_boost()
3228 gpu_ins->mgpu_fan_enabled = 1; in amdgpu_device_enable_mgpu_fan_boost()
3239 * amdgpu_device_ip_late_init - run late init for hardware IPs
3255 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_late_init()
3256 if (!adev->ip_blocks[i].status.hw) in amdgpu_device_ip_late_init()
3258 if (adev->ip_blocks[i].version->funcs->late_init) { in amdgpu_device_ip_late_init()
3259 r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]); in amdgpu_device_ip_late_init()
3262 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_late_init()
3266 adev->ip_blocks[i].status.late_initialized = true; in amdgpu_device_ip_late_init()
3289 ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) || in amdgpu_device_ip_late_init()
3290 adev->asic_type == CHIP_ALDEBARAN)) in amdgpu_device_ip_late_init()
3293 if (adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_device_ip_late_init()
3297 * Reset device p-state to low as this was booted with high. in amdgpu_device_ip_late_init()
3309 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) { in amdgpu_device_ip_late_init()
3312 if (gpu_instance->adev->flags & AMD_IS_APU) in amdgpu_device_ip_late_init()
3315 r = amdgpu_xgmi_set_pstate(gpu_instance->adev, in amdgpu_device_ip_late_init()
3334 if (!ip_block->version->funcs->hw_fini) { in amdgpu_ip_block_hw_fini()
3336 ip_block->version->funcs->name); in amdgpu_ip_block_hw_fini()
3338 r = ip_block->version->funcs->hw_fini(ip_block); in amdgpu_ip_block_hw_fini()
3342 ip_block->version->funcs->name, r); in amdgpu_ip_block_hw_fini()
3346 ip_block->status.hw = false; in amdgpu_ip_block_hw_fini()
3350 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
3363 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_smu_fini_early()
3364 if (!adev->ip_blocks[i].status.hw) in amdgpu_device_smu_fini_early()
3366 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { in amdgpu_device_smu_fini_early()
3367 amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]); in amdgpu_device_smu_fini_early()
3377 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_fini_early()
3378 if (!adev->ip_blocks[i].version->funcs->early_fini) in amdgpu_device_ip_fini_early()
3381 r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]); in amdgpu_device_ip_fini_early()
3384 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_fini_early()
3396 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_fini_early()
3397 if (!adev->ip_blocks[i].status.hw) in amdgpu_device_ip_fini_early()
3400 amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]); in amdgpu_device_ip_fini_early()
3412 * amdgpu_device_ip_fini - run fini for hardware IPs
3426 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done) in amdgpu_device_ip_fini()
3429 if (adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_ip_fini()
3434 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_fini()
3435 if (!adev->ip_blocks[i].status.sw) in amdgpu_device_ip_fini()
3438 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { in amdgpu_device_ip_fini()
3440 amdgpu_free_static_csa(&adev->virt.csa_obj); in amdgpu_device_ip_fini()
3447 if (adev->ip_blocks[i].version->funcs->sw_fini) { in amdgpu_device_ip_fini()
3448 r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]); in amdgpu_device_ip_fini()
3452 adev->ip_blocks[i].version->funcs->name, r); in amdgpu_device_ip_fini()
3455 adev->ip_blocks[i].status.sw = false; in amdgpu_device_ip_fini()
3456 adev->ip_blocks[i].status.valid = false; in amdgpu_device_ip_fini()
3459 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_fini()
3460 if (!adev->ip_blocks[i].status.late_initialized) in amdgpu_device_ip_fini()
3462 if (adev->ip_blocks[i].version->funcs->late_fini) in amdgpu_device_ip_fini()
3463 adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]); in amdgpu_device_ip_fini()
3464 adev->ip_blocks[i].status.late_initialized = false; in amdgpu_device_ip_fini()
3473 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
3491 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work); in amdgpu_device_delay_enable_gfx_off()
3493 WARN_ON_ONCE(adev->gfx.gfx_off_state); in amdgpu_device_delay_enable_gfx_off()
3494 WARN_ON_ONCE(adev->gfx.gfx_off_req_count); in amdgpu_device_delay_enable_gfx_off()
3497 adev->gfx.gfx_off_state = true; in amdgpu_device_delay_enable_gfx_off()
3501 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
3524 dev_warn(adev->dev, "Failed to disallow df cstate"); in amdgpu_device_ip_suspend_phase1()
3526 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_suspend_phase1()
3527 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_suspend_phase1()
3531 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE) in amdgpu_device_ip_suspend_phase1()
3535 r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); in amdgpu_device_ip_suspend_phase1()
3544 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3558 if (adev->in_s0ix) in amdgpu_device_ip_suspend_phase2()
3561 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { in amdgpu_device_ip_suspend_phase2()
3562 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_suspend_phase2()
3565 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) in amdgpu_device_ip_suspend_phase2()
3569 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { in amdgpu_device_ip_suspend_phase2()
3570 adev->ip_blocks[i].status.hw = false; in amdgpu_device_ip_suspend_phase2()
3576 adev, adev->ip_blocks[i].version->type)) in amdgpu_device_ip_suspend_phase2()
3579 /* skip suspend of gfx/mes and psp for S0ix in amdgpu_device_ip_suspend_phase2()
3580 * gfx is in gfxoff state, so on resume it will exit gfxoff just in amdgpu_device_ip_suspend_phase2()
3584 if (adev->in_s0ix && in amdgpu_device_ip_suspend_phase2()
3585 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP || in amdgpu_device_ip_suspend_phase2()
3586 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || in amdgpu_device_ip_suspend_phase2()
3587 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES)) in amdgpu_device_ip_suspend_phase2()
3590 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */ in amdgpu_device_ip_suspend_phase2()
3591 if (adev->in_s0ix && in amdgpu_device_ip_suspend_phase2()
3594 (adev->ip_blocks[i].version->type == in amdgpu_device_ip_suspend_phase2()
3598 /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot. in amdgpu_device_ip_suspend_phase2()
3599 * These are in TMR, hence are expected to be reused by PSP-TOS to reload in amdgpu_device_ip_suspend_phase2()
3601 * from here based on PMFW -> PSP message during re-init sequence. in amdgpu_device_ip_suspend_phase2()
3606 (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs && in amdgpu_device_ip_suspend_phase2()
3607 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) in amdgpu_device_ip_suspend_phase2()
3611 r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); in amdgpu_device_ip_suspend_phase2()
3612 adev->ip_blocks[i].status.hw = false; in amdgpu_device_ip_suspend_phase2()
3616 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { in amdgpu_device_ip_suspend_phase2()
3617 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); in amdgpu_device_ip_suspend_phase2()
3620 adev->mp1_state, r); in amdgpu_device_ip_suspend_phase2()
3631 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3674 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_reinit_early_sriov()
3678 block = &adev->ip_blocks[i]; in amdgpu_device_ip_reinit_early_sriov()
3679 block->status.hw = false; in amdgpu_device_ip_reinit_early_sriov()
3683 if (block->version->type != ip_order[j] || in amdgpu_device_ip_reinit_early_sriov()
3684 !block->status.valid) in amdgpu_device_ip_reinit_early_sriov()
3687 r = block->version->funcs->hw_init(&adev->ip_blocks[i]); in amdgpu_device_ip_reinit_early_sriov()
3689 dev_err(adev->dev, "RE-INIT-early: %s failed\n", in amdgpu_device_ip_reinit_early_sriov()
3690 block->version->funcs->name); in amdgpu_device_ip_reinit_early_sriov()
3693 block->status.hw = true; in amdgpu_device_ip_reinit_early_sriov()
3723 if (block->status.valid && !block->status.hw) { in amdgpu_device_ip_reinit_late_sriov()
3724 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) { in amdgpu_device_ip_reinit_late_sriov()
3727 r = block->version->funcs->hw_init(block); in amdgpu_device_ip_reinit_late_sriov()
3731 dev_err(adev->dev, "RE-INIT-late: %s failed\n", in amdgpu_device_ip_reinit_late_sriov()
3732 block->version->funcs->name); in amdgpu_device_ip_reinit_late_sriov()
3735 block->status.hw = true; in amdgpu_device_ip_reinit_late_sriov()
3743 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3758 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_resume_phase1()
3759 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) in amdgpu_device_ip_resume_phase1()
3761 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || in amdgpu_device_ip_resume_phase1()
3762 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || in amdgpu_device_ip_resume_phase1()
3763 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || in amdgpu_device_ip_resume_phase1()
3764 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) { in amdgpu_device_ip_resume_phase1()
3766 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); in amdgpu_device_ip_resume_phase1()
3776 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3792 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_resume_phase2()
3793 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) in amdgpu_device_ip_resume_phase2()
3795 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || in amdgpu_device_ip_resume_phase2()
3796 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || in amdgpu_device_ip_resume_phase2()
3797 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || in amdgpu_device_ip_resume_phase2()
3798 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE || in amdgpu_device_ip_resume_phase2()
3799 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) in amdgpu_device_ip_resume_phase2()
3801 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); in amdgpu_device_ip_resume_phase2()
3810 * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs
3826 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_resume_phase3()
3827 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) in amdgpu_device_ip_resume_phase3()
3829 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) { in amdgpu_device_ip_resume_phase3()
3830 r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); in amdgpu_device_ip_resume_phase3()
3840 * amdgpu_device_ip_resume - run resume for hardware IPs
3865 if (adev->mman.buffer_funcs_ring->sched.ready) in amdgpu_device_ip_resume()
3879 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3883 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3888 if (adev->is_atom_fw) { in amdgpu_device_detect_sriov_bios()
3890 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; in amdgpu_device_detect_sriov_bios()
3893 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; in amdgpu_device_detect_sriov_bios()
3896 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)) in amdgpu_device_detect_sriov_bios()
3902 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3927 * Fallback to the non-DC driver here by default so as not to in amdgpu_device_asic_has_dc_support()
3943 * Fallback to the non-DC driver here by default so as not to in amdgpu_device_asic_has_dc_support()
3959 * amdgpu_device_has_dc_support - check if dc is supported
3967 if (adev->enable_virtual_display || in amdgpu_device_has_dc_support()
3968 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) in amdgpu_device_has_dc_support()
3971 return amdgpu_device_asic_has_dc_support(adev->asic_type); in amdgpu_device_has_dc_support()
3992 task_barrier_enter(&hive->tb); in amdgpu_device_xgmi_reset_func()
3993 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev)); in amdgpu_device_xgmi_reset_func()
3995 if (adev->asic_reset_res) in amdgpu_device_xgmi_reset_func()
3998 task_barrier_exit(&hive->tb); in amdgpu_device_xgmi_reset_func()
3999 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev)); in amdgpu_device_xgmi_reset_func()
4001 if (adev->asic_reset_res) in amdgpu_device_xgmi_reset_func()
4007 task_barrier_full(&hive->tb); in amdgpu_device_xgmi_reset_func()
4008 adev->asic_reset_res = amdgpu_asic_reset(adev); in amdgpu_device_xgmi_reset_func()
4012 if (adev->asic_reset_res) in amdgpu_device_xgmi_reset_func()
4014 adev->asic_reset_res, adev_to_drm(adev)->unique); in amdgpu_device_xgmi_reset_func()
4029 * In SR-IOV or passthrough mode, timeout for compute in amdgpu_device_get_job_timeout_settings()
4032 adev->gfx_timeout = msecs_to_jiffies(10000); in amdgpu_device_get_job_timeout_settings()
4033 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; in amdgpu_device_get_job_timeout_settings()
4035 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ? in amdgpu_device_get_job_timeout_settings()
4038 adev->compute_timeout = msecs_to_jiffies(60000); in amdgpu_device_get_job_timeout_settings()
4052 dev_warn(adev->dev, "lockup timeout disabled"); in amdgpu_device_get_job_timeout_settings()
4060 adev->gfx_timeout = timeout; in amdgpu_device_get_job_timeout_settings()
4063 adev->compute_timeout = timeout; in amdgpu_device_get_job_timeout_settings()
4066 adev->sdma_timeout = timeout; in amdgpu_device_get_job_timeout_settings()
4069 adev->video_timeout = timeout; in amdgpu_device_get_job_timeout_settings()
4077 * it should apply to all non-compute jobs. in amdgpu_device_get_job_timeout_settings()
4080 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; in amdgpu_device_get_job_timeout_settings()
4082 adev->compute_timeout = adev->gfx_timeout; in amdgpu_device_get_job_timeout_settings()
4090 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
4100 domain = iommu_get_domain_for_dev(adev->dev); in amdgpu_device_check_iommu_direct_map()
4101 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY) in amdgpu_device_check_iommu_direct_map()
4102 adev->ram_is_direct_mapped = true; in amdgpu_device_check_iommu_direct_map()
4107 * amdgpu_device_check_iommu_remap - Check if DMA remapping is enabled.
4117 domain = iommu_get_domain_for_dev(adev->dev); in amdgpu_device_check_iommu_remap()
4118 if (domain && (domain->type == IOMMU_DOMAIN_DMA || in amdgpu_device_check_iommu_remap()
4119 domain->type == IOMMU_DOMAIN_DMA_FQ)) in amdgpu_device_check_iommu_remap()
4134 adev->gfx.mcbp = true; in amdgpu_device_set_mcbp()
4136 adev->gfx.mcbp = false; in amdgpu_device_set_mcbp()
4139 adev->gfx.mcbp = true; in amdgpu_device_set_mcbp()
4141 if (adev->gfx.mcbp) in amdgpu_device_set_mcbp()
4146 * amdgpu_device_init - initialize the driver
4159 struct pci_dev *pdev = adev->pdev; in amdgpu_device_init()
4165 adev->shutdown = false; in amdgpu_device_init()
4166 adev->flags = flags; in amdgpu_device_init()
4169 adev->asic_type = amdgpu_force_asic_type; in amdgpu_device_init()
4171 adev->asic_type = flags & AMD_ASIC_MASK; in amdgpu_device_init()
4173 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; in amdgpu_device_init()
4175 adev->usec_timeout *= 10; in amdgpu_device_init()
4176 adev->gmc.gart_size = 512 * 1024 * 1024; in amdgpu_device_init()
4177 adev->accel_working = false; in amdgpu_device_init()
4178 adev->num_rings = 0; in amdgpu_device_init()
4179 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub()); in amdgpu_device_init()
4180 adev->mman.buffer_funcs = NULL; in amdgpu_device_init()
4181 adev->mman.buffer_funcs_ring = NULL; in amdgpu_device_init()
4182 adev->vm_manager.vm_pte_funcs = NULL; in amdgpu_device_init()
4183 adev->vm_manager.vm_pte_num_scheds = 0; in amdgpu_device_init()
4184 adev->gmc.gmc_funcs = NULL; in amdgpu_device_init()
4185 adev->harvest_ip_mask = 0x0; in amdgpu_device_init()
4186 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); in amdgpu_device_init()
4187 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); in amdgpu_device_init()
4189 adev->smc_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
4190 adev->smc_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
4191 adev->pcie_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
4192 adev->pcie_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
4193 adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext; in amdgpu_device_init()
4194 adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext; in amdgpu_device_init()
4195 adev->pciep_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
4196 adev->pciep_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
4197 adev->pcie_rreg64 = &amdgpu_invalid_rreg64; in amdgpu_device_init()
4198 adev->pcie_wreg64 = &amdgpu_invalid_wreg64; in amdgpu_device_init()
4199 adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext; in amdgpu_device_init()
4200 adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext; in amdgpu_device_init()
4201 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
4202 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
4203 adev->didt_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
4204 adev->didt_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
4205 adev->gc_cac_rreg = &amdgpu_invalid_rreg; in amdgpu_device_init()
4206 adev->gc_cac_wreg = &amdgpu_invalid_wreg; in amdgpu_device_init()
4207 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; in amdgpu_device_init()
4208 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; in amdgpu_device_init()
4211 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, in amdgpu_device_init()
4212 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); in amdgpu_device_init()
4217 mutex_init(&adev->firmware.mutex); in amdgpu_device_init()
4218 mutex_init(&adev->pm.mutex); in amdgpu_device_init()
4219 mutex_init(&adev->gfx.gpu_clock_mutex); in amdgpu_device_init()
4220 mutex_init(&adev->srbm_mutex); in amdgpu_device_init()
4221 mutex_init(&adev->gfx.pipe_reserve_mutex); in amdgpu_device_init()
4222 mutex_init(&adev->gfx.gfx_off_mutex); in amdgpu_device_init()
4223 mutex_init(&adev->gfx.partition_mutex); in amdgpu_device_init()
4224 mutex_init(&adev->grbm_idx_mutex); in amdgpu_device_init()
4225 mutex_init(&adev->mn_lock); in amdgpu_device_init()
4226 mutex_init(&adev->virt.vf_errors.lock); in amdgpu_device_init()
4227 hash_init(adev->mn_hash); in amdgpu_device_init()
4228 mutex_init(&adev->psp.mutex); in amdgpu_device_init()
4229 mutex_init(&adev->notifier_lock); in amdgpu_device_init()
4230 mutex_init(&adev->pm.stable_pstate_ctx_lock); in amdgpu_device_init()
4231 mutex_init(&adev->benchmark_mutex); in amdgpu_device_init()
4232 mutex_init(&adev->gfx.reset_sem_mutex); in amdgpu_device_init()
4233 /* Initialize the mutex for cleaner shader isolation between GFX and compute processes */ in amdgpu_device_init()
4234 mutex_init(&adev->enforce_isolation_mutex); in amdgpu_device_init()
4235 mutex_init(&adev->gfx.kfd_sch_mutex); in amdgpu_device_init()
4243 spin_lock_init(&adev->mmio_idx_lock); in amdgpu_device_init()
4244 spin_lock_init(&adev->smc_idx_lock); in amdgpu_device_init()
4245 spin_lock_init(&adev->pcie_idx_lock); in amdgpu_device_init()
4246 spin_lock_init(&adev->uvd_ctx_idx_lock); in amdgpu_device_init()
4247 spin_lock_init(&adev->didt_idx_lock); in amdgpu_device_init()
4248 spin_lock_init(&adev->gc_cac_idx_lock); in amdgpu_device_init()
4249 spin_lock_init(&adev->se_cac_idx_lock); in amdgpu_device_init()
4250 spin_lock_init(&adev->audio_endpt_idx_lock); in amdgpu_device_init()
4251 spin_lock_init(&adev->mm_stats.lock); in amdgpu_device_init()
4252 spin_lock_init(&adev->virt.rlcg_reg_lock); in amdgpu_device_init()
4253 spin_lock_init(&adev->wb.lock); in amdgpu_device_init()
4255 INIT_LIST_HEAD(&adev->reset_list); in amdgpu_device_init()
4257 INIT_LIST_HEAD(&adev->ras_list); in amdgpu_device_init()
4259 INIT_LIST_HEAD(&adev->pm.od_kobj_list); in amdgpu_device_init()
4261 INIT_DELAYED_WORK(&adev->delayed_init_work, in amdgpu_device_init()
4263 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, in amdgpu_device_init()
4269 * each GFX and compute ring. If there are any fences, it schedules in amdgpu_device_init()
4275 INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work, in amdgpu_device_init()
4277 adev->gfx.enforce_isolation[i].adev = adev; in amdgpu_device_init()
4278 adev->gfx.enforce_isolation[i].xcp_id = i; in amdgpu_device_init()
4281 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); in amdgpu_device_init()
4283 adev->gfx.gfx_off_req_count = 1; in amdgpu_device_init()
4284 adev->gfx.gfx_off_residency = 0; in amdgpu_device_init()
4285 adev->gfx.gfx_off_entrycount = 0; in amdgpu_device_init()
4286 adev->pm.ac_power = power_supply_is_system_supplied() > 0; in amdgpu_device_init()
4288 atomic_set(&adev->throttling_logging_enabled, 1); in amdgpu_device_init()
4291 * to avoid log flooding. "-1" is subtracted since the thermal in amdgpu_device_init()
4296 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1); in amdgpu_device_init()
4297 ratelimit_state_init(&adev->virt.ras_telemetry_rs, 5 * HZ, 1); in amdgpu_device_init()
4299 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE); in amdgpu_device_init()
4300 ratelimit_set_flags(&adev->virt.ras_telemetry_rs, RATELIMIT_MSG_ON_RELEASE); in amdgpu_device_init()
4304 if (adev->asic_type >= CHIP_BONAIRE) { in amdgpu_device_init()
4305 adev->rmmio_base = pci_resource_start(adev->pdev, 5); in amdgpu_device_init()
4306 adev->rmmio_size = pci_resource_len(adev->pdev, 5); in amdgpu_device_init()
4308 adev->rmmio_base = pci_resource_start(adev->pdev, 2); in amdgpu_device_init()
4309 adev->rmmio_size = pci_resource_len(adev->pdev, 2); in amdgpu_device_init()
4313 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); in amdgpu_device_init()
4315 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); in amdgpu_device_init()
4316 if (!adev->rmmio) in amdgpu_device_init()
4317 return -ENOMEM; in amdgpu_device_init()
4319 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); in amdgpu_device_init()
4320 DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size); in amdgpu_device_init()
4327 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev"); in amdgpu_device_init()
4328 if (!adev->reset_domain) in amdgpu_device_init()
4329 return -ENOMEM; in amdgpu_device_init()
4338 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); in amdgpu_device_init()
4356 r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name); in amdgpu_device_init()
4368 adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT; in amdgpu_device_init()
4372 if (adev->gmc.xgmi.supported) { in amdgpu_device_init()
4373 r = adev->gfxhub.funcs->get_xgmi_info(adev); in amdgpu_device_init()
4380 if (adev->virt.fw_reserve.p_pf2vf) in amdgpu_device_init()
4381 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) in amdgpu_device_init()
4382 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags == in amdgpu_device_init()
4387 } else if ((adev->flags & AMD_IS_APU) && in amdgpu_device_init()
4390 adev->have_atomics_support = true; in amdgpu_device_init()
4392 adev->have_atomics_support = in amdgpu_device_init()
4393 !pci_enable_atomic_ops_to_root(adev->pdev, in amdgpu_device_init()
4398 if (!adev->have_atomics_support) in amdgpu_device_init()
4399 dev_info(adev->dev, "PCIE atomic ops is not supported\n"); in amdgpu_device_init()
4413 if (adev->bios) in amdgpu_device_init()
4420 if (adev->gmc.xgmi.num_physical_nodes) { in amdgpu_device_init()
4421 dev_info(adev->dev, "Pending hive reset.\n"); in amdgpu_device_init()
4438 dev_err(adev->dev, "asic reset on init failed\n"); in amdgpu_device_init()
4445 if (!adev->bios) { in amdgpu_device_init()
4446 dev_err(adev->dev, "no vBIOS found\n"); in amdgpu_device_init()
4447 r = -EINVAL; in amdgpu_device_init()
4453 dev_err(adev->dev, "gpu post error!\n"); in amdgpu_device_init()
4458 if (adev->bios) { in amdgpu_device_init()
4459 if (adev->is_atom_fw) { in amdgpu_device_init()
4463 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); in amdgpu_device_init()
4471 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); in amdgpu_device_init()
4485 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n"); in amdgpu_device_init()
4495 dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); in amdgpu_device_init()
4502 dev_info(adev->dev, in amdgpu_device_init()
4504 adev->gfx.config.max_shader_engines, in amdgpu_device_init()
4505 adev->gfx.config.max_sh_per_se, in amdgpu_device_init()
4506 adev->gfx.config.max_cu_per_sh, in amdgpu_device_init()
4507 adev->gfx.cu_info.number); in amdgpu_device_init()
4509 adev->accel_working = true; in amdgpu_device_init()
4519 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); in amdgpu_device_init()
4531 if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { in amdgpu_device_init()
4534 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); in amdgpu_device_init()
4540 queue_delayed_work(system_wq, &adev->delayed_init_work, in amdgpu_device_init()
4546 flush_delayed_work(&adev->delayed_init_work); in amdgpu_device_init()
4556 drm_err(&adev->ddev, in amdgpu_device_init()
4565 adev->ucode_sysfs_en = false; in amdgpu_device_init()
4568 adev->ucode_sysfs_en = true; in amdgpu_device_init()
4570 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes); in amdgpu_device_init()
4572 dev_err(adev->dev, "Could not create amdgpu device attr\n"); in amdgpu_device_init()
4574 r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group); in amdgpu_device_init()
4576 dev_err(adev->dev, in amdgpu_device_init()
4586 dev_err(adev->dev, "amdgpu_pmu_init failed\n"); in amdgpu_device_init()
4589 if (amdgpu_device_cache_pci_state(adev->pdev)) in amdgpu_device_init()
4596 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) in amdgpu_device_init()
4597 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); in amdgpu_device_init()
4601 if (px || (!dev_is_removable(&adev->pdev->dev) && in amdgpu_device_init()
4603 vga_switcheroo_register_client(adev->pdev, in amdgpu_device_init()
4607 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); in amdgpu_device_init()
4609 if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) in amdgpu_device_init()
4614 adev->pm_nb.notifier_call = amdgpu_device_pm_notifier; in amdgpu_device_init()
4615 r = register_pm_notifier(&adev->pm_nb); in amdgpu_device_init()
4630 dev_err(adev->dev, "VF exclusive mode timeout\n"); in amdgpu_device_init()
4632 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; in amdgpu_device_init()
4633 adev->virt.ops = NULL; in amdgpu_device_init()
4634 r = -EAGAIN; in amdgpu_device_init()
4648 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1); in amdgpu_device_unmap_mmio()
4650 /* Unmap all mapped bars - Doorbell, registers and VRAM */ in amdgpu_device_unmap_mmio()
4653 iounmap(adev->rmmio); in amdgpu_device_unmap_mmio()
4654 adev->rmmio = NULL; in amdgpu_device_unmap_mmio()
4655 if (adev->mman.aper_base_kaddr) in amdgpu_device_unmap_mmio()
4656 iounmap(adev->mman.aper_base_kaddr); in amdgpu_device_unmap_mmio()
4657 adev->mman.aper_base_kaddr = NULL; in amdgpu_device_unmap_mmio()
4660 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { in amdgpu_device_unmap_mmio()
4661 arch_phys_wc_del(adev->gmc.vram_mtrr); in amdgpu_device_unmap_mmio()
4662 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); in amdgpu_device_unmap_mmio()
4667 * amdgpu_device_fini_hw - tear down the driver
4676 dev_info(adev->dev, "amdgpu: finishing device.\n"); in amdgpu_device_fini_hw()
4677 flush_delayed_work(&adev->delayed_init_work); in amdgpu_device_fini_hw()
4679 if (adev->mman.initialized) in amdgpu_device_fini_hw()
4680 drain_workqueue(adev->mman.bdev.wq); in amdgpu_device_fini_hw()
4681 adev->shutdown = true; in amdgpu_device_fini_hw()
4683 unregister_pm_notifier(&adev->pm_nb); in amdgpu_device_fini_hw()
4695 if (adev->mode_info.mode_config_initialized) { in amdgpu_device_fini_hw()
4703 if (adev->pm.sysfs_initialized) in amdgpu_device_fini_hw()
4705 if (adev->ucode_sysfs_en) in amdgpu_device_fini_hw()
4707 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); in amdgpu_device_fini_hw()
4722 if (adev->mman.initialized) in amdgpu_device_fini_hw()
4723 ttm_device_clear_dma_mappings(&adev->mman.bdev); in amdgpu_device_fini_hw()
4739 amdgpu_ucode_release(&adev->firmware.gpu_info_fw); in amdgpu_device_fini_sw()
4740 adev->accel_working = false; in amdgpu_device_fini_sw()
4741 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true)); in amdgpu_device_fini_sw()
4752 kfree(adev->bios); in amdgpu_device_fini_sw()
4753 adev->bios = NULL; in amdgpu_device_fini_sw()
4755 kfree(adev->fru_info); in amdgpu_device_fini_sw()
4756 adev->fru_info = NULL; in amdgpu_device_fini_sw()
4760 if (px || (!dev_is_removable(&adev->pdev->dev) && in amdgpu_device_fini_sw()
4762 vga_switcheroo_unregister_client(adev->pdev); in amdgpu_device_fini_sw()
4765 vga_switcheroo_fini_domain_pm_ops(adev->dev); in amdgpu_device_fini_sw()
4767 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) in amdgpu_device_fini_sw()
4768 vga_client_unregister(adev->pdev); in amdgpu_device_fini_sw()
4772 iounmap(adev->rmmio); in amdgpu_device_fini_sw()
4773 adev->rmmio = NULL; in amdgpu_device_fini_sw()
4779 if (adev->mman.discovery_bin) in amdgpu_device_fini_sw()
4782 amdgpu_reset_put_reset_domain(adev->reset_domain); in amdgpu_device_fini_sw()
4783 adev->reset_domain = NULL; in amdgpu_device_fini_sw()
4785 kfree(adev->pci_state); in amdgpu_device_fini_sw()
4790 * amdgpu_device_evict_resources - evict device resources
4803 if (!adev->in_s4 && (adev->flags & AMD_IS_APU)) in amdgpu_device_evict_resources()
4816 * amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events
4833 adev->in_s4 = true; in amdgpu_device_pm_notifier()
4838 * This is considered non-fatal at this time because in amdgpu_device_pm_notifier()
4840 * See https://gitlab.freedesktop.org/drm/amd/-/issues/3781 in amdgpu_device_pm_notifier()
4851 * amdgpu_device_prepare - prepare for device suspend
4866 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) in amdgpu_device_prepare()
4874 flush_delayed_work(&adev->gfx.gfx_off_delay_work); in amdgpu_device_prepare()
4876 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_prepare()
4877 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_prepare()
4879 if (!adev->ip_blocks[i].version->funcs->prepare_suspend) in amdgpu_device_prepare()
4881 r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]); in amdgpu_device_prepare()
4889 adev->in_s0ix = adev->in_s3 = adev->in_s4 = false; in amdgpu_device_prepare()
4895 * amdgpu_device_suspend - initiate device suspend
4898 * @notify_clients: notify in-kernel DRM clients
4909 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) in amdgpu_device_suspend()
4912 adev->in_suspend = true; in amdgpu_device_suspend()
4927 cancel_delayed_work_sync(&adev->delayed_init_work); in amdgpu_device_suspend()
4933 if (!adev->in_s0ix) in amdgpu_device_suspend()
4934 amdgpu_amdkfd_suspend(adev, adev->in_runpm); in amdgpu_device_suspend()
4957 * amdgpu_device_resume - initiate device resume
4960 * @notify_clients: notify in-kernel DRM clients
4977 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) in amdgpu_device_resume()
4980 if (adev->in_s0ix) in amdgpu_device_resume()
4987 dev_err(adev->dev, "amdgpu asic init failed\n"); in amdgpu_device_resume()
4993 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); in amdgpu_device_resume()
4997 if (!adev->in_s0ix) { in amdgpu_device_resume()
4998 r = amdgpu_amdkfd_resume(adev, adev->in_runpm); in amdgpu_device_resume()
5007 queue_delayed_work(system_wq, &adev->delayed_init_work, in amdgpu_device_resume()
5019 flush_delayed_work(&adev->delayed_init_work); in amdgpu_device_resume()
5026 if (adev->mode_info.num_crtc) { in amdgpu_device_resume()
5037 dev->dev->power.disable_depth++; in amdgpu_device_resume()
5039 if (!adev->dc_enabled) in amdgpu_device_resume()
5044 dev->dev->power.disable_depth--; in amdgpu_device_resume()
5047 adev->in_suspend = false; in amdgpu_device_resume()
5049 if (adev->enable_mes) in amdgpu_device_resume()
5059 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
5079 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_check_soft_reset()
5080 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_check_soft_reset()
5082 if (adev->ip_blocks[i].version->funcs->check_soft_reset) in amdgpu_device_ip_check_soft_reset()
5083 adev->ip_blocks[i].status.hang = in amdgpu_device_ip_check_soft_reset()
5084 adev->ip_blocks[i].version->funcs->check_soft_reset( in amdgpu_device_ip_check_soft_reset()
5085 &adev->ip_blocks[i]); in amdgpu_device_ip_check_soft_reset()
5086 if (adev->ip_blocks[i].status.hang) { in amdgpu_device_ip_check_soft_reset()
5087 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); in amdgpu_device_ip_check_soft_reset()
5095 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
5109 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_pre_soft_reset()
5110 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_pre_soft_reset()
5112 if (adev->ip_blocks[i].status.hang && in amdgpu_device_ip_pre_soft_reset()
5113 adev->ip_blocks[i].version->funcs->pre_soft_reset) { in amdgpu_device_ip_pre_soft_reset()
5114 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]); in amdgpu_device_ip_pre_soft_reset()
5124 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
5139 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_need_full_reset()
5140 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_need_full_reset()
5142 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || in amdgpu_device_ip_need_full_reset()
5143 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || in amdgpu_device_ip_need_full_reset()
5144 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || in amdgpu_device_ip_need_full_reset()
5145 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || in amdgpu_device_ip_need_full_reset()
5146 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { in amdgpu_device_ip_need_full_reset()
5147 if (adev->ip_blocks[i].status.hang) { in amdgpu_device_ip_need_full_reset()
5148 dev_info(adev->dev, "Some block need full reset!\n"); in amdgpu_device_ip_need_full_reset()
5157 * amdgpu_device_ip_soft_reset - do a soft reset
5171 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_soft_reset()
5172 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_soft_reset()
5174 if (adev->ip_blocks[i].status.hang && in amdgpu_device_ip_soft_reset()
5175 adev->ip_blocks[i].version->funcs->soft_reset) { in amdgpu_device_ip_soft_reset()
5176 r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]); in amdgpu_device_ip_soft_reset()
5186 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
5200 for (i = 0; i < adev->num_ip_blocks; i++) { in amdgpu_device_ip_post_soft_reset()
5201 if (!adev->ip_blocks[i].status.valid) in amdgpu_device_ip_post_soft_reset()
5203 if (adev->ip_blocks[i].status.hang && in amdgpu_device_ip_post_soft_reset()
5204 adev->ip_blocks[i].version->funcs->post_soft_reset) in amdgpu_device_ip_post_soft_reset()
5205 r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]); in amdgpu_device_ip_post_soft_reset()
5214 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5228 if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) { in amdgpu_device_reset_sriov()
5232 clear_bit(AMDGPU_HOST_FLR, &reset_context->flags); in amdgpu_device_reset_sriov()
5264 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_reset_sriov()
5275 if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) in amdgpu_device_reset_sriov()
5279 * bare-metal does. in amdgpu_device_reset_sriov()
5297 * amdgpu_device_has_job_running - check if there is any unfinished job
5311 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_device_has_job_running()
5323 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
5343 if (amdgpu_gpu_recovery == -1) { in amdgpu_device_should_recover_gpu()
5344 switch (adev->asic_type) { in amdgpu_device_should_recover_gpu()
5369 dev_info(adev->dev, "GPU recovery disabled.\n"); in amdgpu_device_should_recover_gpu()
5380 dev_info(adev->dev, "GPU mode1 reset\n"); in amdgpu_device_mode1_reset()
5383 * values are used in other cases like restore after mode-2 reset. in amdgpu_device_mode1_reset()
5385 amdgpu_device_cache_pci_state(adev->pdev); in amdgpu_device_mode1_reset()
5388 pci_clear_master(adev->pdev); in amdgpu_device_mode1_reset()
5391 dev_info(adev->dev, "GPU smu mode1 reset\n"); in amdgpu_device_mode1_reset()
5394 dev_info(adev->dev, "GPU psp mode1 reset\n"); in amdgpu_device_mode1_reset()
5401 amdgpu_device_load_pci_state(adev->pdev); in amdgpu_device_mode1_reset()
5407 for (i = 0; i < adev->usec_timeout; i++) { in amdgpu_device_mode1_reset()
5408 u32 memsize = adev->nbio.funcs->get_memsize(adev); in amdgpu_device_mode1_reset()
5415 if (i >= adev->usec_timeout) { in amdgpu_device_mode1_reset()
5416 ret = -ETIMEDOUT; in amdgpu_device_mode1_reset()
5425 dev_err(adev->dev, "GPU mode1 reset failed\n"); in amdgpu_device_mode1_reset()
5434 struct amdgpu_device *tmp_adev = reset_context->reset_req_dev; in amdgpu_device_pre_asic_reset()
5436 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); in amdgpu_device_pre_asic_reset()
5438 if (reset_context->reset_req_dev == adev) in amdgpu_device_pre_asic_reset()
5439 job = reset_context->job; in amdgpu_device_pre_asic_reset()
5448 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_device_pre_asic_reset()
5464 if (job && job->vm) in amdgpu_device_pre_asic_reset()
5465 drm_sched_increase_karma(&job->base); in amdgpu_device_pre_asic_reset()
5469 if (r == -EOPNOTSUPP) in amdgpu_device_pre_asic_reset()
5486 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n"); in amdgpu_device_pre_asic_reset()
5491 if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) { in amdgpu_device_pre_asic_reset()
5492 dev_info(tmp_adev->dev, "Dumping IP State\n"); in amdgpu_device_pre_asic_reset()
5494 for (i = 0; i < tmp_adev->num_ip_blocks; i++) in amdgpu_device_pre_asic_reset()
5495 if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state) in amdgpu_device_pre_asic_reset()
5496 tmp_adev->ip_blocks[i].version->funcs in amdgpu_device_pre_asic_reset()
5497 ->dump_ip_state((void *)&tmp_adev->ip_blocks[i]); in amdgpu_device_pre_asic_reset()
5498 dev_info(tmp_adev->dev, "Dumping IP State Completed\n"); in amdgpu_device_pre_asic_reset()
5504 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); in amdgpu_device_pre_asic_reset()
5507 &reset_context->flags); in amdgpu_device_pre_asic_reset()
5520 device_list_handle = reset_context->reset_device_list; in amdgpu_device_reinit_after_reset()
5523 return -EINVAL; in amdgpu_device_reinit_after_reset()
5525 full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); in amdgpu_device_reinit_after_reset()
5531 if (reset_context->method == AMD_RESET_METHOD_ON_INIT) in amdgpu_device_reinit_after_reset()
5544 dev_warn(tmp_adev->dev, "asic atom init failed!"); in amdgpu_device_reinit_after_reset()
5546 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); in amdgpu_device_reinit_after_reset()
5554 if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) in amdgpu_device_reinit_after_reset()
5555 amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job); in amdgpu_device_reinit_after_reset()
5567 tmp_adev->xcp_mgr); in amdgpu_device_reinit_after_reset()
5575 if (tmp_adev->mman.buffer_funcs_ring->sched.ready) in amdgpu_device_reinit_after_reset()
5591 if (!reset_context->hive && in amdgpu_device_reinit_after_reset()
5592 tmp_adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_reinit_after_reset()
5615 r = -EINVAL; in amdgpu_device_reinit_after_reset()
5620 if (reset_context->hive && in amdgpu_device_reinit_after_reset()
5621 tmp_adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_reinit_after_reset()
5623 reset_context->hive, tmp_adev); in amdgpu_device_reinit_after_reset()
5635 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); in amdgpu_device_reinit_after_reset()
5636 r = -EAGAIN; in amdgpu_device_reinit_after_reset()
5642 tmp_adev->asic_reset_res = r; in amdgpu_device_reinit_after_reset()
5660 reset_context->reset_device_list = device_list_handle; in amdgpu_do_asic_reset()
5663 if (r == -EOPNOTSUPP) in amdgpu_do_asic_reset()
5670 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); in amdgpu_do_asic_reset()
5671 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); in amdgpu_do_asic_reset()
5680 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_do_asic_reset()
5682 &tmp_adev->xgmi_reset_work)) in amdgpu_do_asic_reset()
5683 r = -EALREADY; in amdgpu_do_asic_reset()
5688 dev_err(tmp_adev->dev, in amdgpu_do_asic_reset()
5690 r, adev_to_drm(tmp_adev)->unique); in amdgpu_do_asic_reset()
5699 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_do_asic_reset()
5700 flush_work(&tmp_adev->xgmi_reset_work); in amdgpu_do_asic_reset()
5701 r = tmp_adev->asic_reset_res; in amdgpu_do_asic_reset()
5719 if (r == -EAGAIN) in amdgpu_do_asic_reset()
5720 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); in amdgpu_do_asic_reset()
5722 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); in amdgpu_do_asic_reset()
5733 adev->mp1_state = PP_MP1_STATE_SHUTDOWN; in amdgpu_device_set_mp1_state()
5736 adev->mp1_state = PP_MP1_STATE_RESET; in amdgpu_device_set_mp1_state()
5739 adev->mp1_state = PP_MP1_STATE_NONE; in amdgpu_device_set_mp1_state()
5747 adev->mp1_state = PP_MP1_STATE_NONE; in amdgpu_device_unset_mp1_state()
5754 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), in amdgpu_device_resume_display_audio()
5755 adev->pdev->bus->number, 1); in amdgpu_device_resume_display_audio()
5757 pm_runtime_enable(&(p->dev)); in amdgpu_device_resume_display_audio()
5758 pm_runtime_resume(&(p->dev)); in amdgpu_device_resume_display_audio()
5777 return -EINVAL; in amdgpu_device_suspend_display_audio()
5779 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), in amdgpu_device_suspend_display_audio()
5780 adev->pdev->bus->number, 1); in amdgpu_device_suspend_display_audio()
5782 return -ENODEV; in amdgpu_device_suspend_display_audio()
5784 expires = pm_runtime_autosuspend_expiration(&(p->dev)); in amdgpu_device_suspend_display_audio()
5794 while (!pm_runtime_status_suspended(&(p->dev))) { in amdgpu_device_suspend_display_audio()
5795 if (!pm_runtime_suspend(&(p->dev))) in amdgpu_device_suspend_display_audio()
5799 dev_warn(adev->dev, "failed to suspend display audio\n"); in amdgpu_device_suspend_display_audio()
5802 return -ETIMEDOUT; in amdgpu_device_suspend_display_audio()
5806 pm_runtime_disable(&(p->dev)); in amdgpu_device_suspend_display_audio()
5818 cancel_work(&adev->reset_work); in amdgpu_device_stop_pending_resets()
5821 if (adev->kfd.dev) in amdgpu_device_stop_pending_resets()
5822 cancel_work(&adev->kfd.reset_work); in amdgpu_device_stop_pending_resets()
5825 cancel_work(&adev->virt.flr_work); in amdgpu_device_stop_pending_resets()
5827 if (con && adev->ras_enabled) in amdgpu_device_stop_pending_resets()
5828 cancel_work(&con->recovery_work); in amdgpu_device_stop_pending_resets()
5839 pci_read_config_dword(tmp_adev->pdev, PCI_COMMAND, &status); in amdgpu_device_health_check()
5841 dev_err(tmp_adev->dev, "device lost from bus!"); in amdgpu_device_health_check()
5842 ret = -ENODEV; in amdgpu_device_health_check()
5850 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5857 * Attempt to do soft-reset or full-reset and reinitialize Asic
5880 reset_context->src != AMDGPU_RESET_SRC_RAS) { in amdgpu_device_gpu_recover()
5881 dev_dbg(adev->dev, in amdgpu_device_gpu_recover()
5883 reset_context->src); in amdgpu_device_gpu_recover()
5896 amdgpu_ras_get_context(adev)->reboot) { in amdgpu_device_gpu_recover()
5903 dev_info(adev->dev, "GPU %s begin!\n", in amdgpu_device_gpu_recover()
5909 mutex_lock(&hive->hive_lock); in amdgpu_device_gpu_recover()
5911 reset_context->job = job; in amdgpu_device_gpu_recover()
5912 reset_context->hive = hive; in amdgpu_device_gpu_recover()
5919 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) { in amdgpu_device_gpu_recover()
5920 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_device_gpu_recover()
5921 list_add_tail(&tmp_adev->reset_list, &device_list); in amdgpu_device_gpu_recover()
5922 if (adev->shutdown) in amdgpu_device_gpu_recover()
5923 tmp_adev->shutdown = true; in amdgpu_device_gpu_recover()
5925 if (!list_is_first(&adev->reset_list, &device_list)) in amdgpu_device_gpu_recover()
5926 list_rotate_to_front(&adev->reset_list, &device_list); in amdgpu_device_gpu_recover()
5929 list_add_tail(&adev->reset_list, &device_list); in amdgpu_device_gpu_recover()
5942 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain); in amdgpu_device_gpu_recover()
5964 cancel_delayed_work_sync(&tmp_adev->delayed_init_work); in amdgpu_device_gpu_recover()
5982 struct amdgpu_ring *ring = tmp_adev->rings[i]; in amdgpu_device_gpu_recover()
5987 drm_sched_stop(&ring->sched, job ? &job->base : NULL); in amdgpu_device_gpu_recover()
5990 amdgpu_job_stop_all_jobs_on_sched(&ring->sched); in amdgpu_device_gpu_recover()
5992 atomic_inc(&tmp_adev->gpu_reset_counter); in amdgpu_device_gpu_recover()
6002 * job->base holds a reference to parent fence in amdgpu_device_gpu_recover()
6004 if (job && dma_fence_is_signaled(&job->hw_fence)) { in amdgpu_device_gpu_recover()
6006 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); in amdgpu_device_gpu_recover()
6015 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", in amdgpu_device_gpu_recover()
6016 r, adev_to_drm(tmp_adev)->unique); in amdgpu_device_gpu_recover()
6017 tmp_adev->asic_reset_res = r; in amdgpu_device_gpu_recover()
6025 dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n"); in amdgpu_device_gpu_recover()
6027 set_bit(AMDGPU_HOST_FLR, &reset_context->flags); in amdgpu_device_gpu_recover()
6031 if (AMDGPU_RETRY_SRIOV_RESET(r) && (retry_limit--) > 0) { in amdgpu_device_gpu_recover()
6036 adev->asic_reset_res = r; in amdgpu_device_gpu_recover()
6039 if (r && r == -EAGAIN) in amdgpu_device_gpu_recover()
6059 struct amdgpu_ring *ring = tmp_adev->rings[i]; in amdgpu_device_gpu_recover()
6064 drm_sched_start(&ring->sched, 0); in amdgpu_device_gpu_recover()
6070 if (tmp_adev->asic_reset_res) in amdgpu_device_gpu_recover()
6071 r = tmp_adev->asic_reset_res; in amdgpu_device_gpu_recover()
6073 tmp_adev->asic_reset_res = 0; in amdgpu_device_gpu_recover()
6080 if (reset_context->src != AMDGPU_RESET_SRC_RAS || in amdgpu_device_gpu_recover()
6082 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", in amdgpu_device_gpu_recover()
6083 atomic_read(&tmp_adev->gpu_reset_counter)); in amdgpu_device_gpu_recover()
6086 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); in amdgpu_device_gpu_recover()
6101 if (!adev->kfd.init_complete) in amdgpu_device_gpu_recover()
6114 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); in amdgpu_device_gpu_recover()
6118 mutex_unlock(&hive->hive_lock); in amdgpu_device_gpu_recover()
6123 dev_info(adev->dev, "GPU reset end with ret = %d\n", r); in amdgpu_device_gpu_recover()
6125 atomic_set(&adev->reset_domain->reset_res, r); in amdgpu_device_gpu_recover()
6130 * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
6144 struct pci_dev *parent = adev->pdev; in amdgpu_device_partner_bandwidth()
6155 if (parent->vendor == PCI_VENDOR_ID_ATI) in amdgpu_device_partner_bandwidth()
6163 pcie_bandwidth_available(adev->pdev, NULL, speed, width); in amdgpu_device_partner_bandwidth()
6168 * amdgpu_device_gpu_bandwidth - find the bandwidth of the GPU
6181 struct pci_dev *parent = adev->pdev; in amdgpu_device_gpu_bandwidth()
6187 if (parent && parent->vendor == PCI_VENDOR_ID_ATI) { in amdgpu_device_gpu_bandwidth()
6192 if (parent->vendor == PCI_VENDOR_ID_ATI) { in amdgpu_device_gpu_bandwidth()
6200 *speed = pcie_get_speed_cap(adev->pdev); in amdgpu_device_gpu_bandwidth()
6201 *width = pcie_get_width_cap(adev->pdev); in amdgpu_device_gpu_bandwidth()
6206 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
6220 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; in amdgpu_device_get_pcie_info()
6223 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; in amdgpu_device_get_pcie_info()
6226 if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) { in amdgpu_device_get_pcie_info()
6227 if (adev->pm.pcie_gen_mask == 0) in amdgpu_device_get_pcie_info()
6228 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; in amdgpu_device_get_pcie_info()
6229 if (adev->pm.pcie_mlw_mask == 0) in amdgpu_device_get_pcie_info()
6230 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; in amdgpu_device_get_pcie_info()
6234 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask) in amdgpu_device_get_pcie_info()
6241 if (adev->pm.pcie_gen_mask == 0) { in amdgpu_device_get_pcie_info()
6244 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6249 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6255 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6260 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6264 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6267 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1; in amdgpu_device_get_pcie_info()
6271 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6275 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6281 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6286 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6290 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | in amdgpu_device_get_pcie_info()
6293 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; in amdgpu_device_get_pcie_info()
6297 if (adev->pm.pcie_mlw_mask == 0) { in amdgpu_device_get_pcie_info()
6300 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK; in amdgpu_device_get_pcie_info()
6304 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 | in amdgpu_device_get_pcie_info()
6313 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 | in amdgpu_device_get_pcie_info()
6321 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 | in amdgpu_device_get_pcie_info()
6328 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 | in amdgpu_device_get_pcie_info()
6334 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 | in amdgpu_device_get_pcie_info()
6339 adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 | in amdgpu_device_get_pcie_info()
6343 adev->pm.pcie_mlw_mask |= CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1; in amdgpu_device_get_pcie_info()
6351 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK; in amdgpu_device_get_pcie_info()
6355 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | in amdgpu_device_get_pcie_info()
6364 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | in amdgpu_device_get_pcie_info()
6372 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | in amdgpu_device_get_pcie_info()
6379 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | in amdgpu_device_get_pcie_info()
6385 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | in amdgpu_device_get_pcie_info()
6390 adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | in amdgpu_device_get_pcie_info()
6394 adev->pm.pcie_mlw_mask |= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; in amdgpu_device_get_pcie_info()
6404 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
6418 !adev->gmc.xgmi.connected_to_cpu && in amdgpu_device_is_peer_accessible()
6419 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0); in amdgpu_device_is_peer_accessible()
6421 dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n", in amdgpu_device_is_peer_accessible()
6422 pci_name(peer_adev->pdev)); in amdgpu_device_is_peer_accessible()
6424 bool is_large_bar = adev->gmc.visible_vram_size && in amdgpu_device_is_peer_accessible()
6425 adev->gmc.real_vram_size == adev->gmc.visible_vram_size; in amdgpu_device_is_peer_accessible()
6429 uint64_t address_mask = peer_adev->dev->dma_mask ? in amdgpu_device_is_peer_accessible()
6430 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1); in amdgpu_device_is_peer_accessible()
6432 adev->gmc.aper_base + adev->gmc.aper_size - 1; in amdgpu_device_is_peer_accessible()
6434 p2p_addressable = !(adev->gmc.aper_base & address_mask || in amdgpu_device_is_peer_accessible()
6449 return -ENOTSUPP; in amdgpu_device_baco_enter()
6451 if (ras && adev->ras_enabled && in amdgpu_device_baco_enter()
6452 adev->nbio.funcs->enable_doorbell_interrupt) in amdgpu_device_baco_enter()
6453 adev->nbio.funcs->enable_doorbell_interrupt(adev, false); in amdgpu_device_baco_enter()
6465 return -ENOTSUPP; in amdgpu_device_baco_exit()
6471 if (ras && adev->ras_enabled && in amdgpu_device_baco_exit()
6472 adev->nbio.funcs->enable_doorbell_interrupt) in amdgpu_device_baco_exit()
6473 adev->nbio.funcs->enable_doorbell_interrupt(adev, true); in amdgpu_device_baco_exit()
6475 if (amdgpu_passthrough(adev) && adev->nbio.funcs && in amdgpu_device_baco_exit()
6476 adev->nbio.funcs->clear_doorbell_interrupt) in amdgpu_device_baco_exit()
6477 adev->nbio.funcs->clear_doorbell_interrupt(adev); in amdgpu_device_baco_exit()
6483 * amdgpu_pci_error_detected - Called when a PCI error is detected.
6499 if (adev->gmc.xgmi.num_physical_nodes > 1) { in amdgpu_pci_error_detected()
6504 adev->pci_channel_state = state; in amdgpu_pci_error_detected()
6512 * Locking adev->reset_domain->sem will prevent any external access in amdgpu_pci_error_detected()
6515 amdgpu_device_lock_reset_domain(adev->reset_domain); in amdgpu_pci_error_detected()
6523 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_pci_error_detected()
6528 drm_sched_stop(&ring->sched, NULL); in amdgpu_pci_error_detected()
6530 atomic_inc(&adev->gpu_reset_counter); in amdgpu_pci_error_detected()
6541 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
6549 /* TODO - dump whatever for debugging purposes */ in amdgpu_pci_mmio_enabled()
6560 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
6587 list_add_tail(&adev->reset_list, &device_list); in amdgpu_pci_slot_reset()
6596 for (i = 0; i < adev->usec_timeout; i++) { in amdgpu_pci_slot_reset()
6604 r = -ETIME; in amdgpu_pci_slot_reset()
6613 adev->no_hw_access = true; in amdgpu_pci_slot_reset()
6615 adev->no_hw_access = false; in amdgpu_pci_slot_reset()
6623 if (amdgpu_device_cache_pci_state(adev->pdev)) in amdgpu_pci_slot_reset()
6624 pci_restore_state(adev->pdev); in amdgpu_pci_slot_reset()
6630 amdgpu_device_unlock_reset_domain(adev->reset_domain); in amdgpu_pci_slot_reset()
6637 * amdgpu_pci_resume() - resume normal ops after PCI reset
6653 if (adev->pci_channel_state != pci_channel_io_frozen) in amdgpu_pci_resume()
6657 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_pci_resume()
6662 drm_sched_start(&ring->sched, 0); in amdgpu_pci_resume()
6666 amdgpu_device_unlock_reset_domain(adev->reset_domain); in amdgpu_pci_resume()
6680 kfree(adev->pci_state); in amdgpu_device_cache_pci_state()
6682 adev->pci_state = pci_store_saved_state(pdev); in amdgpu_device_cache_pci_state()
6684 if (!adev->pci_state) { in amdgpu_device_cache_pci_state()
6702 if (!adev->pci_state) in amdgpu_device_load_pci_state()
6705 r = pci_load_saved_state(pdev, adev->pci_state); in amdgpu_device_load_pci_state()
6721 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) in amdgpu_device_flush_hdp()
6724 if (adev->gmc.xgmi.connected_to_cpu) in amdgpu_device_flush_hdp()
6727 if (ring && ring->funcs->emit_hdp_flush) in amdgpu_device_flush_hdp()
6737 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) in amdgpu_device_invalidate_hdp()
6740 if (adev->gmc.xgmi.connected_to_cpu) in amdgpu_device_invalidate_hdp()
6748 return atomic_read(&adev->reset_domain->in_gpu_reset); in amdgpu_in_reset()
6752 * amdgpu_device_halt() - bring hardware to some kind of halt state
6766 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6773 struct pci_dev *pdev = adev->pdev; in amdgpu_device_halt()
6783 adev->no_hw_access = true; in amdgpu_device_halt()
6797 address = adev->nbio.funcs->get_pcie_port_index_offset(adev); in amdgpu_device_pcie_port_rreg()
6798 data = adev->nbio.funcs->get_pcie_port_data_offset(adev); in amdgpu_device_pcie_port_rreg()
6800 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_pcie_port_rreg()
6804 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_pcie_port_rreg()
6813 address = adev->nbio.funcs->get_pcie_port_index_offset(adev); in amdgpu_device_pcie_port_wreg()
6814 data = adev->nbio.funcs->get_pcie_port_data_offset(adev); in amdgpu_device_pcie_port_wreg()
6816 spin_lock_irqsave(&adev->pcie_idx_lock, flags); in amdgpu_device_pcie_port_wreg()
6821 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); in amdgpu_device_pcie_port_wreg()
6825 * amdgpu_device_get_gang - return a reference to the current gang
6835 fence = dma_fence_get_rcu_safe(&adev->gang_submit); in amdgpu_device_get_gang()
6841 * amdgpu_device_switch_gang - switch to a new gang
6866 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit, in amdgpu_device_switch_gang()
6880 switch (adev->asic_type) { in amdgpu_device_has_display_hardware()
6913 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) in amdgpu_device_has_display_hardware()
6926 uint32_t loop = adev->usec_timeout; in amdgpu_device_wait_on_rreg()
6930 loop = adev->usec_timeout; in amdgpu_device_wait_on_rreg()
6935 loop--; in amdgpu_device_wait_on_rreg()
6940 ret = -ETIMEDOUT; in amdgpu_device_wait_on_rreg()
6951 if (!ring || !ring->adev) in amdgpu_get_soft_full_reset_mask()
6954 if (amdgpu_device_should_recover_gpu(ring->adev)) in amdgpu_get_soft_full_reset_mask()
6957 if (unlikely(!ring->adev->debug_disable_soft_recovery) && in amdgpu_get_soft_full_reset_mask()
6958 !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery) in amdgpu_get_soft_full_reset_mask()