1 /*
2  * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <stddef.h>
9 
10 #include <arch.h>
11 #include <arch_helpers.h>
12 #include <common/bl_common.h>
13 #include <common/debug.h>
14 #include <context.h>
15 #include <lib/el3_runtime/context_mgmt.h>
16 #include <lib/el3_runtime/cpu_data.h>
17 #include <lib/el3_runtime/pubsub_events.h>
18 #include <lib/pmf/pmf.h>
19 #include <lib/runtime_instr.h>
20 #include <plat/common/platform.h>
21 
22 #include "psci_private.h"
23 
24 /*******************************************************************************
25  * This function does generic and platform specific operations after a wake-up
26  * from standby/retention states at multiple power levels.
27  ******************************************************************************/
psci_suspend_to_standby_finisher(unsigned int cpu_idx,unsigned int end_pwrlvl)28 static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
29 					     unsigned int end_pwrlvl)
30 {
31 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
32 	psci_power_state_t state_info;
33 
34 	/* Get the parent nodes */
35 	psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
36 
37 	psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
38 
39 	/*
40 	 * Find out which retention states this CPU has exited from until the
41 	 * 'end_pwrlvl'. The exit retention state could be deeper than the entry
42 	 * state as a result of state coordination amongst other CPUs post wfi.
43 	 */
44 	psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
45 
46 #if ENABLE_PSCI_STAT
47 	plat_psci_stat_accounting_stop(&state_info);
48 	psci_stats_update_pwr_up(end_pwrlvl, &state_info);
49 #endif
50 
51 	/*
52 	 * Plat. management: Allow the platform to do operations
53 	 * on waking up from retention.
54 	 */
55 	psci_plat_pm_ops->pwr_domain_suspend_finish(&state_info);
56 
57 	/*
58 	 * Set the requested and target state of this CPU and all the higher
59 	 * power domain levels for this CPU to run.
60 	 */
61 	psci_set_pwr_domains_to_run(end_pwrlvl);
62 
63 	psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
64 }
65 
66 /*******************************************************************************
67  * This function does generic and platform specific suspend to power down
68  * operations.
69  ******************************************************************************/
psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,const entry_point_info_t * ep,const psci_power_state_t * state_info)70 static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
71 					  const entry_point_info_t *ep,
72 					  const psci_power_state_t *state_info)
73 {
74 	unsigned int max_off_lvl = psci_find_max_off_lvl(state_info);
75 
76 	PUBLISH_EVENT(psci_suspend_pwrdown_start);
77 
78 #if PSCI_OS_INIT_MODE
79 #ifdef PLAT_MAX_CPU_SUSPEND_PWR_LVL
80 	end_pwrlvl = PLAT_MAX_CPU_SUSPEND_PWR_LVL;
81 #else
82 	end_pwrlvl = PLAT_MAX_PWR_LVL;
83 #endif
84 #endif
85 
86 	/* Save PSCI target power level for the suspend finisher handler */
87 	psci_set_suspend_pwrlvl(end_pwrlvl);
88 
89 	/*
90 	 * Flush the target power level as it might be accessed on power up with
91 	 * Data cache disabled.
92 	 */
93 	psci_flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
94 
95 	/*
96 	 * Call the cpu suspend handler registered by the Secure Payload
97 	 * Dispatcher to let it do any book-keeping. If the handler encounters an
98 	 * error, it's expected to assert within
99 	 */
100 	if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend != NULL))
101 		psci_spd_pm->svc_suspend(max_off_lvl);
102 
103 #if !HW_ASSISTED_COHERENCY
104 	/*
105 	 * Plat. management: Allow the platform to perform any early
106 	 * actions required to power down the CPU. This might be useful for
107 	 * HW_ASSISTED_COHERENCY = 0 platforms that can safely perform these
108 	 * actions with data caches enabled.
109 	 */
110 	if (psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early != NULL)
111 		psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early(state_info);
112 #endif
113 
114 	/*
115 	 * Store the re-entry information for the non-secure world.
116 	 */
117 	cm_init_my_context(ep);
118 
119 #if ENABLE_RUNTIME_INSTRUMENTATION
120 
121 	/*
122 	 * Flush cache line so that even if CPU power down happens
123 	 * the timestamp update is reflected in memory.
124 	 */
125 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
126 		RT_INSTR_ENTER_CFLUSH,
127 		PMF_CACHE_MAINT);
128 #endif
129 
130 	/*
131 	 * Arch. management. Initiate power down sequence.
132 	 * TODO : Introduce a mechanism to query the cache level to flush
133 	 * and the cpu-ops power down to perform from the platform.
134 	 */
135 	psci_pwrdown_cpu(max_off_lvl);
136 
137 #if ENABLE_RUNTIME_INSTRUMENTATION
138 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
139 		RT_INSTR_EXIT_CFLUSH,
140 		PMF_NO_CACHE_MAINT);
141 #endif
142 }
143 
144 /*******************************************************************************
145  * Top level handler which is called when a cpu wants to suspend its execution.
146  * It is assumed that along with suspending the cpu power domain, power domains
147  * at higher levels until the target power level will be suspended as well. It
148  * coordinates with the platform to negotiate the target state for each of
149  * the power domain level till the target power domain level. It then performs
150  * generic, architectural, platform setup and state management required to
151  * suspend that power domain level and power domain levels below it.
152  * e.g. For a cpu that's to be suspended, it could mean programming the
153  * power controller whereas for a cluster that's to be suspended, it will call
154  * the platform specific code which will disable coherency at the interconnect
155  * level if the cpu is the last in the cluster and also the program the power
156  * controller.
157  *
158  * All the required parameter checks are performed at the beginning and after
159  * the state transition has been done, no further error is expected and it is
160  * not possible to undo any of the actions taken beyond that point.
161  ******************************************************************************/
psci_cpu_suspend_start(const entry_point_info_t * ep,unsigned int end_pwrlvl,psci_power_state_t * state_info,unsigned int is_power_down_state)162 int psci_cpu_suspend_start(const entry_point_info_t *ep,
163 			   unsigned int end_pwrlvl,
164 			   psci_power_state_t *state_info,
165 			   unsigned int is_power_down_state)
166 {
167 	int rc = PSCI_E_SUCCESS;
168 	bool skip_wfi = false;
169 	unsigned int idx = plat_my_core_pos();
170 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
171 
172 	/*
173 	 * This function must only be called on platforms where the
174 	 * CPU_SUSPEND platform hooks have been implemented.
175 	 */
176 	assert((psci_plat_pm_ops->pwr_domain_suspend != NULL) &&
177 	       (psci_plat_pm_ops->pwr_domain_suspend_finish != NULL));
178 
179 	/* Get the parent nodes */
180 	psci_get_parent_pwr_domain_nodes(idx, end_pwrlvl, parent_nodes);
181 
182 	/*
183 	 * This function acquires the lock corresponding to each power
184 	 * level so that by the time all locks are taken, the system topology
185 	 * is snapshot and state management can be done safely.
186 	 */
187 	psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
188 
189 	/*
190 	 * We check if there are any pending interrupts after the delay
191 	 * introduced by lock contention to increase the chances of early
192 	 * detection that a wake-up interrupt has fired.
193 	 */
194 	if (read_isr_el1() != 0U) {
195 		skip_wfi = true;
196 		goto exit;
197 	}
198 
199 #if PSCI_OS_INIT_MODE
200 	if (psci_suspend_mode == OS_INIT) {
201 		/*
202 		 * This function validates the requested state info for
203 		 * OS-initiated mode.
204 		 */
205 		rc = psci_validate_state_coordination(end_pwrlvl, state_info);
206 		if (rc != PSCI_E_SUCCESS) {
207 			skip_wfi = true;
208 			goto exit;
209 		}
210 	} else {
211 #endif
212 		/*
213 		 * This function is passed the requested state info and
214 		 * it returns the negotiated state info for each power level upto
215 		 * the end level specified.
216 		 */
217 		psci_do_state_coordination(end_pwrlvl, state_info);
218 #if PSCI_OS_INIT_MODE
219 	}
220 #endif
221 
222 #if PSCI_OS_INIT_MODE
223 	if (psci_plat_pm_ops->pwr_domain_validate_suspend != NULL) {
224 		rc = psci_plat_pm_ops->pwr_domain_validate_suspend(state_info);
225 		if (rc != PSCI_E_SUCCESS) {
226 			skip_wfi = true;
227 			goto exit;
228 		}
229 	}
230 #endif
231 
232 	/* Update the target state in the power domain nodes */
233 	psci_set_target_local_pwr_states(end_pwrlvl, state_info);
234 
235 #if ENABLE_PSCI_STAT
236 	/* Update the last cpu for each level till end_pwrlvl */
237 	psci_stats_update_pwr_down(end_pwrlvl, state_info);
238 #endif
239 
240 	if (is_power_down_state != 0U)
241 		psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info);
242 
243 	/*
244 	 * Plat. management: Allow the platform to perform the
245 	 * necessary actions to turn off this cpu e.g. set the
246 	 * platform defined mailbox with the psci entrypoint,
247 	 * program the power controller etc.
248 	 */
249 
250 	psci_plat_pm_ops->pwr_domain_suspend(state_info);
251 
252 #if ENABLE_PSCI_STAT
253 	plat_psci_stat_accounting_start(state_info);
254 #endif
255 
256 exit:
257 	/*
258 	 * Release the locks corresponding to each power level in the
259 	 * reverse order to which they were acquired.
260 	 */
261 	psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
262 
263 	if (skip_wfi) {
264 		return rc;
265 	}
266 
267 	if (is_power_down_state != 0U) {
268 #if ENABLE_RUNTIME_INSTRUMENTATION
269 
270 		/*
271 		 * Update the timestamp with cache off.  We assume this
272 		 * timestamp can only be read from the current CPU and the
273 		 * timestamp cache line will be flushed before return to
274 		 * normal world on wakeup.
275 		 */
276 		PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
277 		    RT_INSTR_ENTER_HW_LOW_PWR,
278 		    PMF_NO_CACHE_MAINT);
279 #endif
280 
281 		/* The function calls below must not return */
282 		if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi != NULL)
283 			psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info);
284 		else
285 			psci_power_down_wfi();
286 	}
287 
288 #if ENABLE_RUNTIME_INSTRUMENTATION
289 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
290 	    RT_INSTR_ENTER_HW_LOW_PWR,
291 	    PMF_NO_CACHE_MAINT);
292 #endif
293 
294 	/*
295 	 * We will reach here if only retention/standby states have been
296 	 * requested at multiple power levels. This means that the cpu
297 	 * context will be preserved.
298 	 */
299 	wfi();
300 
301 #if ENABLE_RUNTIME_INSTRUMENTATION
302 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
303 	    RT_INSTR_EXIT_HW_LOW_PWR,
304 	    PMF_NO_CACHE_MAINT);
305 #endif
306 
307 	/*
308 	 * After we wake up from context retaining suspend, call the
309 	 * context retaining suspend finisher.
310 	 */
311 	psci_suspend_to_standby_finisher(idx, end_pwrlvl);
312 
313 	return rc;
314 }
315 
316 /*******************************************************************************
317  * The following functions finish an earlier suspend request. They
318  * are called by the common finisher routine in psci_common.c. The `state_info`
319  * is the psci_power_state from which this CPU has woken up from.
320  ******************************************************************************/
psci_cpu_suspend_finish(unsigned int cpu_idx,const psci_power_state_t * state_info)321 void psci_cpu_suspend_finish(unsigned int cpu_idx, const psci_power_state_t *state_info)
322 {
323 	unsigned int counter_freq;
324 	unsigned int max_off_lvl;
325 
326 	/* Ensure we have been woken up from a suspended state */
327 	assert((psci_get_aff_info_state() == AFF_STATE_ON) &&
328 		(is_local_state_off(
329 			state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]) != 0));
330 
331 	/*
332 	 * Plat. management: Perform the platform specific actions
333 	 * before we change the state of the cpu e.g. enabling the
334 	 * gic or zeroing the mailbox register. If anything goes
335 	 * wrong then assert as there is no way to recover from this
336 	 * situation.
337 	 */
338 	psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
339 
340 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
341 	/* Arch. management: Enable the data cache, stack memory maintenance. */
342 	psci_do_pwrup_cache_maintenance();
343 #endif
344 
345 	/* Re-init the cntfrq_el0 register */
346 	counter_freq = plat_get_syscnt_freq2();
347 	write_cntfrq_el0(counter_freq);
348 
349 #if ENABLE_PAUTH
350 	/* Store APIAKey_EL1 key */
351 	set_cpu_data(apiakey[0], read_apiakeylo_el1());
352 	set_cpu_data(apiakey[1], read_apiakeyhi_el1());
353 #endif /* ENABLE_PAUTH */
354 
355 	/*
356 	 * Call the cpu suspend finish handler registered by the Secure Payload
357 	 * Dispatcher to let it do any bookeeping. If the handler encounters an
358 	 * error, it's expected to assert within
359 	 */
360 	if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend_finish != NULL)) {
361 		max_off_lvl = psci_find_max_off_lvl(state_info);
362 		assert(max_off_lvl != PSCI_INVALID_PWR_LVL);
363 		psci_spd_pm->svc_suspend_finish(max_off_lvl);
364 	}
365 
366 	/* Invalidate the suspend level for the cpu */
367 	psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
368 
369 	PUBLISH_EVENT(psci_suspend_pwrdown_finish);
370 }
371