Lines Matching full:pcie

3  * MediaTek PCIe host controller driver.
132 /* Time in ms needed to complete PCIe reset on EN7581 SoC */
149 * @power_up: pcie power_up callback
151 * @flags: pcie device flags.
154 int (*power_up)(struct mtk_gen3_pcie *pcie);
175 * struct mtk_gen3_pcie - PCIe port information
176 * @dev: pointer to PCIe device
182 * @clks: PCIe clocks
183 * @num_clks: PCIe clocks count for this port
184 * @max_link_speed: Maximum link speed (PCIe Gen) for this port
185 * @num_lanes: Number of PCIe lanes for this port
186 * @irq: PCIe controller interrupt number
265 struct mtk_gen3_pcie *pcie = bus->sysdata; in mtk_pcie_config_tlp_header() local
274 writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG); in mtk_pcie_config_tlp_header()
280 struct mtk_gen3_pcie *pcie = bus->sysdata; in mtk_pcie_map_bus() local
282 return pcie->base + PCIE_CFG_OFFSET_ADDR + where; in mtk_pcie_map_bus()
310 static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie, in mtk_pcie_set_trans_table() argument
334 dev_err(pcie->dev, "illegal table size %#llx\n", in mtk_pcie_set_trans_table()
339 table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET; in mtk_pcie_set_trans_table()
355 dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n", in mtk_pcie_set_trans_table()
366 dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n", in mtk_pcie_set_trans_table()
372 static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie) in mtk_pcie_enable_msi() argument
378 struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; in mtk_pcie_enable_msi()
380 msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG + in mtk_pcie_enable_msi()
382 msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG + in mtk_pcie_enable_msi()
388 pcie->base + PCIE_MSI_SET_ADDR_HI_BASE + in mtk_pcie_enable_msi()
392 val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG); in mtk_pcie_enable_msi()
394 writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG); in mtk_pcie_enable_msi()
396 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_enable_msi()
398 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_enable_msi()
401 static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie) in mtk_pcie_startup_port() argument
404 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); in mtk_pcie_startup_port()
409 /* Set as RC mode and set controller PCIe Gen speed restriction, if any */ in mtk_pcie_startup_port()
410 val = readl_relaxed(pcie->base + PCIE_SETTING_REG); in mtk_pcie_startup_port()
412 if (pcie->max_link_speed) { in mtk_pcie_startup_port()
416 if (pcie->max_link_speed >= 2) in mtk_pcie_startup_port()
418 GENMASK(pcie->max_link_speed - 2, 0)); in mtk_pcie_startup_port()
420 if (pcie->num_lanes) { in mtk_pcie_startup_port()
424 if (pcie->num_lanes > 1) in mtk_pcie_startup_port()
426 GENMASK(fls(pcie->num_lanes >> 2), 0)); in mtk_pcie_startup_port()
428 writel_relaxed(val, pcie->base + PCIE_SETTING_REG); in mtk_pcie_startup_port()
431 if (pcie->max_link_speed) { in mtk_pcie_startup_port()
432 val = readl_relaxed(pcie->base + PCIE_CONF_LINK2_CTL_STS); in mtk_pcie_startup_port()
434 val |= FIELD_PREP(PCIE_CONF_LINK2_LCR2_LINK_SPEED, pcie->max_link_speed); in mtk_pcie_startup_port()
435 writel_relaxed(val, pcie->base + PCIE_CONF_LINK2_CTL_STS); in mtk_pcie_startup_port()
439 val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1); in mtk_pcie_startup_port()
442 writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1); in mtk_pcie_startup_port()
445 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_startup_port()
447 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_startup_port()
450 val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG); in mtk_pcie_startup_port()
452 writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG); in mtk_pcie_startup_port()
456 * causing occasional PCIe link down. In order to overcome the issue, in mtk_pcie_startup_port()
458 * PCIe block is reset using en7523_reset_assert() and in mtk_pcie_startup_port()
461 if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) { in mtk_pcie_startup_port()
463 val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_startup_port()
466 writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_startup_port()
469 * Described in PCIe CEM specification revision 6.0. in mtk_pcie_startup_port()
479 writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_startup_port()
483 err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val, in mtk_pcie_startup_port()
490 val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG); in mtk_pcie_startup_port()
494 dev_err(pcie->dev, in mtk_pcie_startup_port()
495 "PCIe link down, current LTSSM state: %s (%#x)\n", in mtk_pcie_startup_port()
500 mtk_pcie_enable_msi(pcie); in mtk_pcie_startup_port()
502 /* Set PCIe translation windows */ in mtk_pcie_startup_port()
519 err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size, in mtk_pcie_startup_port()
557 struct mtk_gen3_pcie *pcie = data->domain->host_data; in mtk_compose_msi_msg() local
565 dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n", in mtk_compose_msi_msg()
582 struct mtk_gen3_pcie *pcie = data->domain->host_data; in mtk_msi_bottom_irq_mask() local
588 raw_spin_lock_irqsave(&pcie->irq_lock, flags); in mtk_msi_bottom_irq_mask()
592 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); in mtk_msi_bottom_irq_mask()
598 struct mtk_gen3_pcie *pcie = data->domain->host_data; in mtk_msi_bottom_irq_unmask() local
604 raw_spin_lock_irqsave(&pcie->irq_lock, flags); in mtk_msi_bottom_irq_unmask()
608 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); in mtk_msi_bottom_irq_unmask()
623 struct mtk_gen3_pcie *pcie = domain->host_data; in mtk_msi_bottom_domain_alloc() local
627 mutex_lock(&pcie->lock); in mtk_msi_bottom_domain_alloc()
629 hwirq = bitmap_find_free_region(pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM, in mtk_msi_bottom_domain_alloc()
632 mutex_unlock(&pcie->lock); in mtk_msi_bottom_domain_alloc()
638 msi_set = &pcie->msi_sets[set_idx]; in mtk_msi_bottom_domain_alloc()
651 struct mtk_gen3_pcie *pcie = domain->host_data; in mtk_msi_bottom_domain_free() local
654 mutex_lock(&pcie->lock); in mtk_msi_bottom_domain_free()
656 bitmap_release_region(pcie->msi_irq_in_use, data->hwirq, in mtk_msi_bottom_domain_free()
659 mutex_unlock(&pcie->lock); in mtk_msi_bottom_domain_free()
671 struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data); in mtk_intx_mask() local
675 raw_spin_lock_irqsave(&pcie->irq_lock, flags); in mtk_intx_mask()
676 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_intx_mask()
678 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); in mtk_intx_mask()
679 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); in mtk_intx_mask()
684 struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data); in mtk_intx_unmask() local
688 raw_spin_lock_irqsave(&pcie->irq_lock, flags); in mtk_intx_unmask()
689 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_intx_unmask()
691 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); in mtk_intx_unmask()
692 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); in mtk_intx_unmask()
705 struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data); in mtk_intx_eoi() local
709 writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG); in mtk_intx_eoi()
732 static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie) in mtk_pcie_init_irq_domains() argument
734 struct device *dev = pcie->dev; in mtk_pcie_init_irq_domains()
738 raw_spin_lock_init(&pcie->irq_lock); in mtk_pcie_init_irq_domains()
747 pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX, in mtk_pcie_init_irq_domains()
748 &intx_domain_ops, pcie); in mtk_pcie_init_irq_domains()
749 if (!pcie->intx_domain) { in mtk_pcie_init_irq_domains()
756 mutex_init(&pcie->lock); in mtk_pcie_init_irq_domains()
758 pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM, in mtk_pcie_init_irq_domains()
759 &mtk_msi_bottom_domain_ops, pcie); in mtk_pcie_init_irq_domains()
760 if (!pcie->msi_bottom_domain) { in mtk_pcie_init_irq_domains()
766 pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode, in mtk_pcie_init_irq_domains()
768 pcie->msi_bottom_domain); in mtk_pcie_init_irq_domains()
769 if (!pcie->msi_domain) { in mtk_pcie_init_irq_domains()
779 irq_domain_remove(pcie->msi_bottom_domain); in mtk_pcie_init_irq_domains()
781 irq_domain_remove(pcie->intx_domain); in mtk_pcie_init_irq_domains()
787 static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie) in mtk_pcie_irq_teardown() argument
789 irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); in mtk_pcie_irq_teardown()
791 if (pcie->intx_domain) in mtk_pcie_irq_teardown()
792 irq_domain_remove(pcie->intx_domain); in mtk_pcie_irq_teardown()
794 if (pcie->msi_domain) in mtk_pcie_irq_teardown()
795 irq_domain_remove(pcie->msi_domain); in mtk_pcie_irq_teardown()
797 if (pcie->msi_bottom_domain) in mtk_pcie_irq_teardown()
798 irq_domain_remove(pcie->msi_bottom_domain); in mtk_pcie_irq_teardown()
800 irq_dispose_mapping(pcie->irq); in mtk_pcie_irq_teardown()
803 static void mtk_pcie_msi_handler(struct mtk_gen3_pcie *pcie, int set_idx) in mtk_pcie_msi_handler() argument
805 struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx]; in mtk_pcie_msi_handler()
820 generic_handle_domain_irq(pcie->msi_bottom_domain, hwirq); in mtk_pcie_msi_handler()
827 struct mtk_gen3_pcie *pcie = irq_desc_get_handler_data(desc); in mtk_pcie_irq_handler() local
834 status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG); in mtk_pcie_irq_handler()
837 generic_handle_domain_irq(pcie->intx_domain, in mtk_pcie_irq_handler()
843 mtk_pcie_msi_handler(pcie, irq_bit - PCIE_MSI_SHIFT); in mtk_pcie_irq_handler()
845 writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG); in mtk_pcie_irq_handler()
851 static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie) in mtk_pcie_setup_irq() argument
853 struct device *dev = pcie->dev; in mtk_pcie_setup_irq()
857 err = mtk_pcie_init_irq_domains(pcie); in mtk_pcie_setup_irq()
861 pcie->irq = platform_get_irq(pdev, 0); in mtk_pcie_setup_irq()
862 if (pcie->irq < 0) in mtk_pcie_setup_irq()
863 return pcie->irq; in mtk_pcie_setup_irq()
865 irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie); in mtk_pcie_setup_irq()
870 static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie) in mtk_pcie_parse_port() argument
872 int i, ret, num_resets = pcie->soc->phy_resets.num_resets; in mtk_pcie_parse_port()
873 struct device *dev = pcie->dev; in mtk_pcie_parse_port()
878 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac"); in mtk_pcie_parse_port()
881 pcie->base = devm_ioremap_resource(dev, regs); in mtk_pcie_parse_port()
882 if (IS_ERR(pcie->base)) { in mtk_pcie_parse_port()
884 return PTR_ERR(pcie->base); in mtk_pcie_parse_port()
887 pcie->reg_base = regs->start; in mtk_pcie_parse_port()
890 pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i]; in mtk_pcie_parse_port()
892 ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, pcie->phy_resets); in mtk_pcie_parse_port()
898 pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac"); in mtk_pcie_parse_port()
899 if (IS_ERR(pcie->mac_reset)) { in mtk_pcie_parse_port()
900 ret = PTR_ERR(pcie->mac_reset); in mtk_pcie_parse_port()
907 pcie->phy = devm_phy_optional_get(dev, "pcie-phy"); in mtk_pcie_parse_port()
908 if (IS_ERR(pcie->phy)) { in mtk_pcie_parse_port()
909 ret = PTR_ERR(pcie->phy); in mtk_pcie_parse_port()
916 pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks); in mtk_pcie_parse_port()
917 if (pcie->num_clks < 0) { in mtk_pcie_parse_port()
919 return pcie->num_clks; in mtk_pcie_parse_port()
927 pcie->num_lanes = num_lanes; in mtk_pcie_parse_port()
933 static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie) in mtk_pcie_en7581_power_up() argument
935 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); in mtk_pcie_en7581_power_up()
936 struct device *dev = pcie->dev; in mtk_pcie_en7581_power_up()
947 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, in mtk_pcie_en7581_power_up()
948 pcie->phy_resets); in mtk_pcie_en7581_power_up()
949 reset_control_assert(pcie->mac_reset); in mtk_pcie_en7581_power_up()
956 * hw to detect if a given address is accessible on PCIe controller. in mtk_pcie_en7581_power_up()
978 err = phy_init(pcie->phy); in mtk_pcie_en7581_power_up()
984 err = phy_power_on(pcie->phy); in mtk_pcie_en7581_power_up()
990 err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); in mtk_pcie_en7581_power_up()
1009 writel_relaxed(val, pcie->base + PCIE_EQ_PRESET_01_REG); in mtk_pcie_en7581_power_up()
1015 writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG); in mtk_pcie_en7581_power_up()
1017 err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks); in mtk_pcie_en7581_power_up()
1024 * Airoha EN7581 performs PCIe reset via clk callbacks since it has a in mtk_pcie_en7581_power_up()
1026 * complete the PCIe reset. in mtk_pcie_en7581_power_up()
1035 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); in mtk_pcie_en7581_power_up()
1037 phy_power_off(pcie->phy); in mtk_pcie_en7581_power_up()
1039 phy_exit(pcie->phy); in mtk_pcie_en7581_power_up()
1044 static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie) in mtk_pcie_power_up() argument
1046 struct device *dev = pcie->dev; in mtk_pcie_power_up()
1053 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, in mtk_pcie_power_up()
1054 pcie->phy_resets); in mtk_pcie_power_up()
1055 reset_control_assert(pcie->mac_reset); in mtk_pcie_power_up()
1059 err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); in mtk_pcie_power_up()
1065 err = phy_init(pcie->phy); in mtk_pcie_power_up()
1071 err = phy_power_on(pcie->phy); in mtk_pcie_power_up()
1078 reset_control_deassert(pcie->mac_reset); in mtk_pcie_power_up()
1083 err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks); in mtk_pcie_power_up()
1094 reset_control_assert(pcie->mac_reset); in mtk_pcie_power_up()
1095 phy_power_off(pcie->phy); in mtk_pcie_power_up()
1097 phy_exit(pcie->phy); in mtk_pcie_power_up()
1099 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); in mtk_pcie_power_up()
1104 static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie) in mtk_pcie_power_down() argument
1106 clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks); in mtk_pcie_power_down()
1108 pm_runtime_put_sync(pcie->dev); in mtk_pcie_power_down()
1109 pm_runtime_disable(pcie->dev); in mtk_pcie_power_down()
1110 reset_control_assert(pcie->mac_reset); in mtk_pcie_power_down()
1112 phy_power_off(pcie->phy); in mtk_pcie_power_down()
1113 phy_exit(pcie->phy); in mtk_pcie_power_down()
1114 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); in mtk_pcie_power_down()
1117 static int mtk_pcie_get_controller_max_link_speed(struct mtk_gen3_pcie *pcie) in mtk_pcie_get_controller_max_link_speed() argument
1122 val = readl_relaxed(pcie->base + PCIE_BASE_CFG_REG); in mtk_pcie_get_controller_max_link_speed()
1129 static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie) in mtk_pcie_setup() argument
1133 err = mtk_pcie_parse_port(pcie); in mtk_pcie_setup()
1141 reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); in mtk_pcie_setup()
1144 err = pcie->soc->power_up(pcie); in mtk_pcie_setup()
1148 err = of_pci_get_max_link_speed(pcie->dev->of_node); in mtk_pcie_setup()
1151 max_speed = mtk_pcie_get_controller_max_link_speed(pcie); in mtk_pcie_setup()
1155 pcie->max_link_speed = err; in mtk_pcie_setup()
1156 dev_info(pcie->dev, in mtk_pcie_setup()
1158 max_speed, pcie->max_link_speed); in mtk_pcie_setup()
1163 err = mtk_pcie_startup_port(pcie); in mtk_pcie_setup()
1167 err = mtk_pcie_setup_irq(pcie); in mtk_pcie_setup()
1174 mtk_pcie_power_down(pcie); in mtk_pcie_setup()
1182 struct mtk_gen3_pcie *pcie; in mtk_pcie_probe() local
1186 host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); in mtk_pcie_probe()
1190 pcie = pci_host_bridge_priv(host); in mtk_pcie_probe()
1192 pcie->dev = dev; in mtk_pcie_probe()
1193 pcie->soc = device_get_match_data(dev); in mtk_pcie_probe()
1194 platform_set_drvdata(pdev, pcie); in mtk_pcie_probe()
1196 err = mtk_pcie_setup(pcie); in mtk_pcie_probe()
1201 host->sysdata = pcie; in mtk_pcie_probe()
1205 mtk_pcie_irq_teardown(pcie); in mtk_pcie_probe()
1206 mtk_pcie_power_down(pcie); in mtk_pcie_probe()
1215 struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev); in mtk_pcie_remove() local
1216 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); in mtk_pcie_remove()
1223 mtk_pcie_irq_teardown(pcie); in mtk_pcie_remove()
1224 mtk_pcie_power_down(pcie); in mtk_pcie_remove()
1227 static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie) in mtk_pcie_irq_save() argument
1231 raw_spin_lock(&pcie->irq_lock); in mtk_pcie_irq_save()
1233 pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_irq_save()
1236 struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; in mtk_pcie_irq_save()
1242 raw_spin_unlock(&pcie->irq_lock); in mtk_pcie_irq_save()
1245 static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie) in mtk_pcie_irq_restore() argument
1249 raw_spin_lock(&pcie->irq_lock); in mtk_pcie_irq_restore()
1251 writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_irq_restore()
1254 struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; in mtk_pcie_irq_restore()
1260 raw_spin_unlock(&pcie->irq_lock); in mtk_pcie_irq_restore()
1263 static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie) in mtk_pcie_turn_off_link() argument
1267 val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG); in mtk_pcie_turn_off_link()
1269 writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG); in mtk_pcie_turn_off_link()
1272 return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val, in mtk_pcie_turn_off_link()
1280 struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev); in mtk_pcie_suspend_noirq() local
1285 err = mtk_pcie_turn_off_link(pcie); in mtk_pcie_suspend_noirq()
1287 dev_err(pcie->dev, "cannot enter L2 state\n"); in mtk_pcie_suspend_noirq()
1291 if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) { in mtk_pcie_suspend_noirq()
1293 val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_suspend_noirq()
1295 writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_suspend_noirq()
1298 dev_dbg(pcie->dev, "entered L2 states successfully"); in mtk_pcie_suspend_noirq()
1300 mtk_pcie_irq_save(pcie); in mtk_pcie_suspend_noirq()
1301 mtk_pcie_power_down(pcie); in mtk_pcie_suspend_noirq()
1308 struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev); in mtk_pcie_resume_noirq() local
1311 err = pcie->soc->power_up(pcie); in mtk_pcie_resume_noirq()
1315 err = mtk_pcie_startup_port(pcie); in mtk_pcie_resume_noirq()
1317 mtk_pcie_power_down(pcie); in mtk_pcie_resume_noirq()
1321 mtk_pcie_irq_restore(pcie); in mtk_pcie_resume_noirq()
1351 { .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 },
1352 { .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
1361 .name = "mtk-pcie-gen3",
1369 MODULE_DESCRIPTION("MediaTek Gen3 PCIe host controller driver");