Lines Matching +full:modem +full:- +full:init

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2021-2022, Intel Corporation.
48 /* Modem feature query identification code - "ICCC" */
72 * t7xx_pci_mhccif_isr() - Process MHCCIF interrupts.
78 ** 0 - Success.
79 ** -EINVAL - Failure to get FSM control.
83 struct t7xx_modem *md = t7xx_dev->md; in t7xx_pci_mhccif_isr()
89 ctl = md->fsm_ctl; in t7xx_pci_mhccif_isr()
91 dev_err_ratelimited(&t7xx_dev->pdev->dev, in t7xx_pci_mhccif_isr()
93 return -EINVAL; in t7xx_pci_mhccif_isr()
96 spin_lock_bh(&md->exp_lock); in t7xx_pci_mhccif_isr()
98 md->exp_id |= int_sta; in t7xx_pci_mhccif_isr()
99 if (md->exp_id & D2H_INT_EXCEPTION_INIT) { in t7xx_pci_mhccif_isr()
100 if (ctl->md_state == MD_STATE_INVALID || in t7xx_pci_mhccif_isr()
101 ctl->md_state == MD_STATE_WAITING_FOR_HS1 || in t7xx_pci_mhccif_isr()
102 ctl->md_state == MD_STATE_WAITING_FOR_HS2 || in t7xx_pci_mhccif_isr()
103 ctl->md_state == MD_STATE_READY) { in t7xx_pci_mhccif_isr()
104 md->exp_id &= ~D2H_INT_EXCEPTION_INIT; in t7xx_pci_mhccif_isr()
107 } else if (md->exp_id & D2H_INT_PORT_ENUM) { in t7xx_pci_mhccif_isr()
108 md->exp_id &= ~D2H_INT_PORT_ENUM; in t7xx_pci_mhccif_isr()
110 if (ctl->curr_state == FSM_STATE_INIT || ctl->curr_state == FSM_STATE_PRE_START || in t7xx_pci_mhccif_isr()
111 ctl->curr_state == FSM_STATE_STOPPED) in t7xx_pci_mhccif_isr()
113 } else if (ctl->md_state == MD_STATE_WAITING_FOR_HS1) { in t7xx_pci_mhccif_isr()
115 if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) { in t7xx_pci_mhccif_isr()
116 md->exp_id &= ~D2H_INT_ASYNC_MD_HK; in t7xx_pci_mhccif_isr()
117 queue_work(md->handshake_wq, &md->handshake_work); in t7xx_pci_mhccif_isr()
120 spin_unlock_bh(&md->exp_lock); in t7xx_pci_mhccif_isr()
127 struct t7xx_addr_base *pbase_addr = &t7xx_dev->base_addr; in t7xx_clr_device_irq_via_pcie()
131 reset_pcie_reg = pbase_addr->pcie_ext_reg_base + TOPRGU_CH_PCIE_IRQ_STA - in t7xx_clr_device_irq_via_pcie()
132 pbase_addr->pcie_dev_reg_trsl_addr; in t7xx_clr_device_irq_via_pcie()
149 struct device *dev = &t7xx_dev->pdev->dev; in t7xx_acpi_reset()
156 return -EFAULT; in t7xx_acpi_reset()
161 return -EFAULT; in t7xx_acpi_reset()
167 return -EFAULT; in t7xx_acpi_reset()
172 struct device *dev = &t7xx_dev->pdev->dev; in t7xx_acpi_reset()
175 ret = pci_reset_function(t7xx_dev->pdev); in t7xx_acpi_reset()
198 pci_save_state(t7xx_dev->pdev); in t7xx_reset_device()
201 WRITE_ONCE(t7xx_dev->debug_ports_show, false); in t7xx_reset_device()
213 pci_restore_state(t7xx_dev->pdev); in t7xx_reset_device()
243 struct t7xx_modem *modem; in t7xx_rgu_isr_handler() local
246 if (!t7xx_dev->rgu_pci_irq_en) in t7xx_rgu_isr_handler()
249 modem = t7xx_dev->md; in t7xx_rgu_isr_handler()
250 modem->rgu_irq_asserted = true; in t7xx_rgu_isr_handler()
261 t7xx_dev->intr_handler[SAP_RGU_INT] = t7xx_rgu_isr_handler; in t7xx_pcie_register_rgu_isr()
262 t7xx_dev->intr_thread[SAP_RGU_INT] = t7xx_rgu_isr_thread; in t7xx_pcie_register_rgu_isr()
263 t7xx_dev->callback_param[SAP_RGU_INT] = t7xx_dev; in t7xx_pcie_register_rgu_isr()
268 * t7xx_cldma_exception() - CLDMA exception handler.
269 * @md_ctrl: modem control struct.
272 * Part of the modem exception recovery.
276 * HIF_EX_ALLQ_RESET: HW is back in safe mode for re-initialization and restart.
279 /* Modem Exception Handshake Flow
281 * Modem HW Exception interrupt received
284 * +---------v--------+
286 * +------------------+
288 * +---------v--------+
289 * | HIF_EX_INIT_DONE | : Wait for the init to be done
290 * +------------------+
292 * +---------v--------+
294 * +------------------+ : Flush TX/RX workqueues
296 * +---------v--------+
298 * +------------------+
315 if (md_ctrl->hif_id == CLDMA_ID_MD) in t7xx_cldma_exception()
316 t7xx_cldma_hw_reset(md_ctrl->t7xx_dev->base_addr.infracfg_ao_base); in t7xx_cldma_exception()
322 t7xx_cldma_hw_init(&md_ctrl->hw_info); in t7xx_cldma_exception()
333 struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev; in t7xx_md_exception()
338 t7xx_port_proxy_reset(md->port_prox); in t7xx_md_exception()
341 t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage); in t7xx_md_exception()
342 t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage); in t7xx_md_exception()
355 if (md->exp_id & event_id) in t7xx_wait_hif_ex_hk_event()
362 return -EFAULT; in t7xx_wait_hif_ex_hk_event()
374 t7xx_dev->rgu_pci_irq_en = true; in t7xx_md_sys_sw_init()
394 ft_query->head_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID); in t7xx_prepare_host_rt_data_query()
395 memcpy(ft_query->feature_set, core->feature_set, FEATURE_COUNT); in t7xx_prepare_host_rt_data_query()
396 ft_query->tail_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID); in t7xx_prepare_host_rt_data_query()
399 t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS1_MSG, 0); in t7xx_prepare_host_rt_data_query()
411 if (le32_to_cpu(md_feature->head_pattern) != MD_FEATURE_QUERY_ID || in t7xx_prepare_device_rt_data()
412 le32_to_cpu(md_feature->tail_pattern) != MD_FEATURE_QUERY_ID) { in t7xx_prepare_device_rt_data()
414 le32_to_cpu(md_feature->head_pattern), in t7xx_prepare_device_rt_data()
415 le32_to_cpu(md_feature->tail_pattern)); in t7xx_prepare_device_rt_data()
416 return -EINVAL; in t7xx_prepare_device_rt_data()
420 if (FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]) != in t7xx_prepare_device_rt_data()
427 return -ENOMEM; in t7xx_prepare_device_rt_data()
434 u8 md_feature_mask = FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]); in t7xx_prepare_device_rt_data()
439 rt_feature->feature_id = i; in t7xx_prepare_device_rt_data()
441 rt_feature->support_info = md_feature->feature_set[i]; in t7xx_prepare_device_rt_data()
447 t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS3_MSG, 0); in t7xx_prepare_device_rt_data()
461 offset += sizeof(*rt_feature) + le32_to_cpu(rt_feature->data_len); in t7xx_parse_host_rt_data()
463 ft_spt_cfg = FIELD_GET(FEATURE_MSK, core->feature_set[i]); in t7xx_parse_host_rt_data()
467 ft_spt_st = FIELD_GET(FEATURE_MSK, rt_feature->support_info); in t7xx_parse_host_rt_data()
469 return -EINVAL; in t7xx_parse_host_rt_data()
472 t7xx_port_enum_msg_handler(ctl->md, rt_feature->data); in t7xx_parse_host_rt_data()
480 struct device *dev = &md->t7xx_dev->pdev->dev; in t7xx_core_reset()
481 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in t7xx_core_reset()
483 md->core_md.ready = false; in t7xx_core_reset()
487 return -EINVAL; in t7xx_core_reset()
490 if (md->core_md.handshake_ongoing) { in t7xx_core_reset()
497 md->core_md.handshake_ongoing = false; in t7xx_core_reset()
507 struct device *dev = &md->t7xx_dev->pdev->dev; in t7xx_core_hk_handler()
516 spin_lock_irqsave(&ctl->event_lock, flags); in t7xx_core_hk_handler()
517 list_for_each_entry_safe(event, event_next, &ctl->event_queue, entry) { in t7xx_core_hk_handler()
518 if (event->event_id == err_detect) { in t7xx_core_hk_handler()
519 list_del(&event->entry); in t7xx_core_hk_handler()
520 spin_unlock_irqrestore(&ctl->event_lock, flags); in t7xx_core_hk_handler()
523 } else if (event->event_id == event_id) { in t7xx_core_hk_handler()
524 list_del(&event->entry); in t7xx_core_hk_handler()
529 spin_unlock_irqrestore(&ctl->event_lock, flags); in t7xx_core_hk_handler()
534 wait_event_interruptible(ctl->event_wq, !list_empty(&ctl->event_queue) || in t7xx_core_hk_handler()
540 if (!event || ctl->exp_flg) in t7xx_core_hk_handler()
543 ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length); in t7xx_core_hk_handler()
549 if (ctl->exp_flg) in t7xx_core_hk_handler()
552 ret = t7xx_prepare_device_rt_data(core_info, dev, event->data); in t7xx_core_hk_handler()
558 core_info->ready = true; in t7xx_core_hk_handler()
559 core_info->handshake_ongoing = false; in t7xx_core_hk_handler()
560 wake_up(&ctl->async_hk_wq); in t7xx_core_hk_handler()
568 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in t7xx_md_hk_wq()
572 t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD], CLDMA_SHARED_Q_CFG); in t7xx_md_hk_wq()
573 t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]); in t7xx_md_hk_wq()
575 md->core_md.handshake_ongoing = true; in t7xx_md_hk_wq()
576 t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT); in t7xx_md_hk_wq()
582 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in t7xx_ap_hk_wq()
586 t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]); in t7xx_ap_hk_wq()
587 t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP], CLDMA_SHARED_Q_CFG); in t7xx_ap_hk_wq()
588 t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]); in t7xx_ap_hk_wq()
589 md->core_ap.handshake_ongoing = true; in t7xx_ap_hk_wq()
590 t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT); in t7xx_ap_hk_wq()
595 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in t7xx_md_event_notify()
601 t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK | in t7xx_md_event_notify()
606 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM); in t7xx_md_event_notify()
608 spin_lock_irqsave(&md->exp_lock, flags); in t7xx_md_event_notify()
609 int_sta = t7xx_get_interrupt_status(md->t7xx_dev); in t7xx_md_event_notify()
610 md->exp_id |= int_sta; in t7xx_md_event_notify()
611 if (md->exp_id & D2H_INT_EXCEPTION_INIT) { in t7xx_md_event_notify()
612 ctl->exp_flg = true; in t7xx_md_event_notify()
613 md->exp_id &= ~D2H_INT_EXCEPTION_INIT; in t7xx_md_event_notify()
614 md->exp_id &= ~D2H_INT_ASYNC_MD_HK; in t7xx_md_event_notify()
615 md->exp_id &= ~D2H_INT_ASYNC_AP_HK; in t7xx_md_event_notify()
616 } else if (ctl->exp_flg) { in t7xx_md_event_notify()
617 md->exp_id &= ~D2H_INT_ASYNC_MD_HK; in t7xx_md_event_notify()
618 md->exp_id &= ~D2H_INT_ASYNC_AP_HK; in t7xx_md_event_notify()
620 void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base; in t7xx_md_event_notify()
622 if (md->exp_id & D2H_INT_ASYNC_MD_HK) { in t7xx_md_event_notify()
623 queue_work(md->handshake_wq, &md->handshake_work); in t7xx_md_event_notify()
624 md->exp_id &= ~D2H_INT_ASYNC_MD_HK; in t7xx_md_event_notify()
626 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); in t7xx_md_event_notify()
629 if (md->exp_id & D2H_INT_ASYNC_AP_HK) { in t7xx_md_event_notify()
630 queue_work(md->handshake_wq, &md->ap_handshake_work); in t7xx_md_event_notify()
631 md->exp_id &= ~D2H_INT_ASYNC_AP_HK; in t7xx_md_event_notify()
633 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK); in t7xx_md_event_notify()
636 spin_unlock_irqrestore(&md->exp_lock, flags); in t7xx_md_event_notify()
638 t7xx_mhccif_mask_clr(md->t7xx_dev, in t7xx_md_event_notify()
646 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); in t7xx_md_event_notify()
647 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK); in t7xx_md_event_notify()
657 struct device *dev = &md->t7xx_dev->pdev->dev; in t7xx_md_exception_handshake()
680 struct device *dev = &t7xx_dev->pdev->dev; in t7xx_md_alloc()
687 md->t7xx_dev = t7xx_dev; in t7xx_md_alloc()
688 t7xx_dev->md = md; in t7xx_md_alloc()
689 spin_lock_init(&md->exp_lock); in t7xx_md_alloc()
690 md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, in t7xx_md_alloc()
692 if (!md->handshake_wq) in t7xx_md_alloc()
695 INIT_WORK(&md->handshake_work, t7xx_md_hk_wq); in t7xx_md_alloc()
696 md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK; in t7xx_md_alloc()
697 md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |= in t7xx_md_alloc()
700 INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq); in t7xx_md_alloc()
701 md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK; in t7xx_md_alloc()
702 md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |= in t7xx_md_alloc()
710 struct t7xx_modem *md = t7xx_dev->md; in t7xx_md_reset()
712 md->md_init_finish = false; in t7xx_md_reset()
713 md->exp_id = 0; in t7xx_md_reset()
715 t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]); in t7xx_md_reset()
716 t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]); in t7xx_md_reset()
717 t7xx_port_proxy_reset(md->port_prox); in t7xx_md_reset()
718 md->md_init_finish = true; in t7xx_md_reset()
723 * t7xx_md_init() - Initialize modem.
730 ** 0 - Success.
731 ** -ENOMEM - Allocation failure.
740 return -ENOMEM; in t7xx_md_init()
758 ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]); in t7xx_md_init()
762 ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]); in t7xx_md_init()
770 ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0); in t7xx_md_init()
775 md->md_init_finish = true; in t7xx_md_init()
779 t7xx_port_proxy_uninit(md->port_prox); in t7xx_md_init()
782 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]); in t7xx_md_init()
785 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); in t7xx_md_init()
794 destroy_workqueue(md->handshake_wq); in t7xx_md_init()
795 dev_err(&t7xx_dev->pdev->dev, "Modem init failed\n"); in t7xx_md_init()
801 enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode); in t7xx_md_exit()
802 struct t7xx_modem *md = t7xx_dev->md; in t7xx_md_exit()
806 if (!md->md_init_finish) in t7xx_md_exit()
810 t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); in t7xx_md_exit()
811 t7xx_port_proxy_uninit(md->port_prox); in t7xx_md_exit()
812 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]); in t7xx_md_exit()
813 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); in t7xx_md_exit()
816 destroy_workqueue(md->handshake_wq); in t7xx_md_exit()