1 /*
2  * Copyright 2022-2023 NXP
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <stdbool.h>
8 
9 #include <arch.h>
10 #include <arch_helpers.h>
11 #include <common/debug.h>
12 #include <drivers/delay_timer.h>
13 #include <lib/mmio.h>
14 #include <lib/psci/psci.h>
15 
16 #include <plat_imx8.h>
17 #include <pwr_ctrl.h>
18 
19 #define CORE_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL0])
20 #define CLUSTER_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL1])
21 #define SYSTEM_PWR_STATE(state) ((state)->pwr_domain_state[PLAT_MAX_PWR_LVL])
22 
23 /* platform secure warm boot entry */
24 static uintptr_t secure_entrypoint;
25 
26 static bool boot_stage = true;
27 
imx_validate_ns_entrypoint(uintptr_t ns_entrypoint)28 int imx_validate_ns_entrypoint(uintptr_t ns_entrypoint)
29 {
30 	/* The non-secure entrypoint should be in RAM space */
31 	if (ns_entrypoint < PLAT_NS_IMAGE_OFFSET) {
32 		return PSCI_E_INVALID_PARAMS;
33 	}
34 
35 	return PSCI_E_SUCCESS;
36 }
37 
imx_validate_power_state(unsigned int power_state,psci_power_state_t * req_state)38 int imx_validate_power_state(unsigned int power_state,
39 			 psci_power_state_t *req_state)
40 {
41 	int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
42 	int pwr_type = psci_get_pstate_type(power_state);
43 	int state_id = psci_get_pstate_id(power_state);
44 
45 	if (pwr_lvl > PLAT_MAX_PWR_LVL) {
46 		return PSCI_E_INVALID_PARAMS;
47 	}
48 
49 	if (pwr_type == PSTATE_TYPE_STANDBY) {
50 		CORE_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
51 		CLUSTER_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
52 	}
53 
54 	if (pwr_type == PSTATE_TYPE_POWERDOWN && state_id == 0x33) {
55 		CORE_PWR_STATE(req_state) = PLAT_MAX_OFF_STATE;
56 		CLUSTER_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
57 	}
58 
59 	return PSCI_E_SUCCESS;
60 }
61 
imx_set_cpu_boot_entry(unsigned int core_id,uint64_t boot_entry)62 void imx_set_cpu_boot_entry(unsigned int core_id, uint64_t boot_entry)
63 {
64 	/* set the cpu core reset entry: BLK_CTRL_S */
65 	mmio_write_32(BLK_CTRL_S_BASE + CA55_RVBADDR0_L + core_id * 8, boot_entry >> 2);
66 }
67 
imx_pwr_domain_on(u_register_t mpidr)68 int imx_pwr_domain_on(u_register_t mpidr)
69 {
70 	unsigned int core_id;
71 
72 	core_id = MPIDR_AFFLVL1_VAL(mpidr);
73 
74 	imx_set_cpu_boot_entry(core_id, secure_entrypoint);
75 
76 	/*
77 	 * When the core is first time boot up, the core is already ON after SoC POR,
78 	 * So 'SW_WAKEUP' can not work, so need to toggle core's reset then release
79 	 * the core from cpu_wait.
80 	 */
81 	if (boot_stage) {
82 		/* assert CPU core SW reset */
83 		mmio_clrbits_32(SRC_SLICE(SRC_A55C0 + core_id) + 0x24, BIT(2) | BIT(0));
84 		/* deassert CPU core SW reset */
85 		mmio_setbits_32(SRC_SLICE(SRC_A55C0 + core_id) + 0x24, BIT(2) | BIT(0));
86 		/* release the cpuwait to kick the cpu */
87 		mmio_clrbits_32(BLK_CTRL_S_BASE + CA55_CPUWAIT, BIT(core_id));
88 	} else {
89 		/* assert the CMC MISC SW WAKEUP BIT to kick the offline core */
90 		gpc_assert_sw_wakeup(CPU_A55C0 + core_id);
91 	}
92 
93 	return PSCI_E_SUCCESS;
94 }
95 
imx_pwr_domain_on_finish(const psci_power_state_t * target_state)96 void imx_pwr_domain_on_finish(const psci_power_state_t *target_state)
97 {
98 	uint64_t mpidr = read_mpidr_el1();
99 	unsigned int core_id = MPIDR_AFFLVL1_VAL(mpidr);
100 
101 	plat_gic_pcpu_init();
102 	plat_gic_cpuif_enable();
103 
104 	/* below config is ok both for boot & hotplug */
105 	/* clear the CPU power mode */
106 	gpc_set_cpu_mode(CPU_A55C0 + core_id, CM_MODE_RUN);
107 	/* clear the SW wakeup */
108 	gpc_deassert_sw_wakeup(CPU_A55C0 + core_id);
109 	/* switch to GIC wakeup source */
110 	gpc_select_wakeup_gic(CPU_A55C0 + core_id);
111 
112 	if (boot_stage) {
113 		/* SRC config */
114 		/* config the MEM LPM */
115 		src_mem_lpm_en(SRC_A55P0_MEM + core_id, MEM_OFF);
116 		/* LPM config to only ON in run mode to its domain */
117 		src_mix_set_lpm(SRC_A55C0 + core_id, core_id, CM_MODE_WAIT);
118 		/* white list config, only enable its own domain */
119 		src_authen_config(SRC_A55C0 + core_id, 1 << core_id, 0x1);
120 
121 		boot_stage = false;
122 	}
123 }
124 
imx_pwr_domain_off(const psci_power_state_t * target_state)125 void imx_pwr_domain_off(const psci_power_state_t *target_state)
126 {
127 	uint64_t mpidr = read_mpidr_el1();
128 	unsigned int core_id = MPIDR_AFFLVL1_VAL(mpidr);
129 	unsigned int i;
130 
131 	plat_gic_cpuif_disable();
132 	write_clusterpwrdn(DSU_CLUSTER_PWR_OFF);
133 
134 	/*
135 	 * mask all the GPC IRQ wakeup to make sure no IRQ can wakeup this core
136 	 * as we need to use SW_WAKEUP for hotplug purpose
137 	 */
138 	for (i = 0U; i < IMR_NUM; i++) {
139 		gpc_set_irq_mask(CPU_A55C0 + core_id, i, 0xffffffff);
140 	}
141 	/* switch to GPC wakeup source */
142 	gpc_select_wakeup_raw_irq(CPU_A55C0 + core_id);
143 	/* config the target mode to suspend */
144 	gpc_set_cpu_mode(CPU_A55C0 + core_id, CM_MODE_SUSPEND);
145 }
146 
imx_pwr_domain_suspend(const psci_power_state_t * target_state)147 void imx_pwr_domain_suspend(const psci_power_state_t *target_state)
148 {
149 	uint64_t mpidr = read_mpidr_el1();
150 	unsigned int core_id = MPIDR_AFFLVL1_VAL(mpidr);
151 
152 	/* do cpu level config */
153 	if (is_local_state_off(CORE_PWR_STATE(target_state))) {
154 		plat_gic_cpuif_disable();
155 		imx_set_cpu_boot_entry(core_id, secure_entrypoint);
156 		/* config the target mode to WAIT */
157 		gpc_set_cpu_mode(CPU_A55C0 + core_id, CM_MODE_WAIT);
158 	}
159 
160 	/* do cluster level config */
161 	if (!is_local_state_run(CLUSTER_PWR_STATE(target_state))) {
162 		/* config the A55 cluster target mode to WAIT */
163 		gpc_set_cpu_mode(CPU_A55_PLAT, CM_MODE_WAIT);
164 
165 		/* config DSU for cluster power down with L3 MEM RET */
166 		if (is_local_state_retn(CLUSTER_PWR_STATE(target_state))) {
167 			write_clusterpwrdn(DSU_CLUSTER_PWR_OFF | BIT(1));
168 		}
169 	}
170 }
171 
imx_pwr_domain_suspend_finish(const psci_power_state_t * target_state)172 void imx_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
173 {
174 	uint64_t mpidr = read_mpidr_el1();
175 	unsigned int core_id = MPIDR_AFFLVL1_VAL(mpidr);
176 
177 	/* cluster level */
178 	if (!is_local_state_run(CLUSTER_PWR_STATE(target_state))) {
179 		/* set the cluster's target mode to RUN */
180 		gpc_set_cpu_mode(CPU_A55_PLAT, CM_MODE_RUN);
181 	}
182 
183 	/* do core level */
184 	if (is_local_state_off(CORE_PWR_STATE(target_state))) {
185 		/* set A55 CORE's power mode to RUN */
186 		gpc_set_cpu_mode(CPU_A55C0 + core_id, CM_MODE_RUN);
187 		plat_gic_cpuif_enable();
188 	}
189 }
190 
imx_get_sys_suspend_power_state(psci_power_state_t * req_state)191 void imx_get_sys_suspend_power_state(psci_power_state_t *req_state)
192 {
193 	unsigned int i;
194 
195 	for (i = IMX_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++) {
196 		req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
197 	}
198 
199 	SYSTEM_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
200 	CLUSTER_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
201 }
202 
imx_system_reset(void)203 void __dead2 imx_system_reset(void)
204 {
205 	mmio_write_32(WDOG3_BASE + WDOG_CNT, 0xd928c520);
206 	while ((mmio_read_32(WDOG3_BASE + WDOG_CS) & WDOG_CS_ULK) == 0U) {
207 		;
208 	}
209 
210 	mmio_write_32(WDOG3_BASE + WDOG_TOVAL, 0x10);
211 	mmio_write_32(WDOG3_BASE + WDOG_CS, 0x21e3);
212 
213 	while (1) {
214 		wfi();
215 	}
216 }
217 
imx_system_off(void)218 void __dead2 imx_system_off(void)
219 {
220 	mmio_setbits_32(BBNSM_BASE + BBNSM_CTRL, BBNSM_DP_EN | BBNSM_TOSP);
221 
222 	while (1) {
223 		wfi();
224 	}
225 }
226 
227 static const plat_psci_ops_t imx_plat_psci_ops = {
228 	.validate_ns_entrypoint = imx_validate_ns_entrypoint,
229 	.validate_power_state = imx_validate_power_state,
230 	.pwr_domain_on = imx_pwr_domain_on,
231 	.pwr_domain_off = imx_pwr_domain_off,
232 	.pwr_domain_on_finish = imx_pwr_domain_on_finish,
233 	.pwr_domain_suspend = imx_pwr_domain_suspend,
234 	.pwr_domain_suspend_finish = imx_pwr_domain_suspend_finish,
235 	.get_sys_suspend_power_state = imx_get_sys_suspend_power_state,
236 	.system_reset = imx_system_reset,
237 	.system_off = imx_system_off,
238 };
239 
240 /* export the platform specific psci ops */
plat_setup_psci_ops(uintptr_t sec_entrypoint,const plat_psci_ops_t ** psci_ops)241 int plat_setup_psci_ops(uintptr_t sec_entrypoint,
242 			const plat_psci_ops_t **psci_ops)
243 {
244 	/* sec_entrypoint is used for warm reset */
245 	secure_entrypoint = sec_entrypoint;
246 	imx_set_cpu_boot_entry(0, sec_entrypoint);
247 
248 	pwr_sys_init();
249 
250 	*psci_ops = &imx_plat_psci_ops;
251 
252 	return 0;
253 }
254