1*54fd6939SJiyong Park /*
2*54fd6939SJiyong Park * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
3*54fd6939SJiyong Park *
4*54fd6939SJiyong Park * SPDX-License-Identifier: BSD-3-Clause
5*54fd6939SJiyong Park */
6*54fd6939SJiyong Park
7*54fd6939SJiyong Park #include <assert.h>
8*54fd6939SJiyong Park #include <string.h>
9*54fd6939SJiyong Park
10*54fd6939SJiyong Park #include <arch.h>
11*54fd6939SJiyong Park #include <arch_helpers.h>
12*54fd6939SJiyong Park #include <common/bl_common.h>
13*54fd6939SJiyong Park #include <common/debug.h>
14*54fd6939SJiyong Park #include <context.h>
15*54fd6939SJiyong Park #include <drivers/delay_timer.h>
16*54fd6939SJiyong Park #include <lib/el3_runtime/context_mgmt.h>
17*54fd6939SJiyong Park #include <lib/utils.h>
18*54fd6939SJiyong Park #include <plat/common/platform.h>
19*54fd6939SJiyong Park
20*54fd6939SJiyong Park #include "psci_private.h"
21*54fd6939SJiyong Park
22*54fd6939SJiyong Park /*
23*54fd6939SJiyong Park * SPD power management operations, expected to be supplied by the registered
24*54fd6939SJiyong Park * SPD on successful SP initialization
25*54fd6939SJiyong Park */
26*54fd6939SJiyong Park const spd_pm_ops_t *psci_spd_pm;
27*54fd6939SJiyong Park
28*54fd6939SJiyong Park /*
29*54fd6939SJiyong Park * PSCI requested local power state map. This array is used to store the local
30*54fd6939SJiyong Park * power states requested by a CPU for power levels from level 1 to
31*54fd6939SJiyong Park * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power
32*54fd6939SJiyong Park * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a
33*54fd6939SJiyong Park * CPU are the same.
34*54fd6939SJiyong Park *
35*54fd6939SJiyong Park * During state coordination, the platform is passed an array containing the
36*54fd6939SJiyong Park * local states requested for a particular non cpu power domain by each cpu
37*54fd6939SJiyong Park * within the domain.
38*54fd6939SJiyong Park *
39*54fd6939SJiyong Park * TODO: Dense packing of the requested states will cause cache thrashing
40*54fd6939SJiyong Park * when multiple power domains write to it. If we allocate the requested
41*54fd6939SJiyong Park * states at each power level in a cache-line aligned per-domain memory,
42*54fd6939SJiyong Park * the cache thrashing can be avoided.
43*54fd6939SJiyong Park */
44*54fd6939SJiyong Park static plat_local_state_t
45*54fd6939SJiyong Park psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT];
46*54fd6939SJiyong Park
47*54fd6939SJiyong Park unsigned int psci_plat_core_count;
48*54fd6939SJiyong Park
49*54fd6939SJiyong Park /*******************************************************************************
50*54fd6939SJiyong Park * Arrays that hold the platform's power domain tree information for state
51*54fd6939SJiyong Park * management of power domains.
52*54fd6939SJiyong Park * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain
53*54fd6939SJiyong Park * which is an ancestor of a CPU power domain.
54*54fd6939SJiyong Park * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain
55*54fd6939SJiyong Park ******************************************************************************/
56*54fd6939SJiyong Park non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]
57*54fd6939SJiyong Park #if USE_COHERENT_MEM
58*54fd6939SJiyong Park __section("tzfw_coherent_mem")
59*54fd6939SJiyong Park #endif
60*54fd6939SJiyong Park ;
61*54fd6939SJiyong Park
62*54fd6939SJiyong Park /* Lock for PSCI state coordination */
63*54fd6939SJiyong Park DEFINE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
64*54fd6939SJiyong Park
65*54fd6939SJiyong Park cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
66*54fd6939SJiyong Park
67*54fd6939SJiyong Park /*******************************************************************************
68*54fd6939SJiyong Park * Pointer to functions exported by the platform to complete power mgmt. ops
69*54fd6939SJiyong Park ******************************************************************************/
70*54fd6939SJiyong Park const plat_psci_ops_t *psci_plat_pm_ops;
71*54fd6939SJiyong Park
72*54fd6939SJiyong Park /******************************************************************************
73*54fd6939SJiyong Park * Check that the maximum power level supported by the platform makes sense
74*54fd6939SJiyong Park *****************************************************************************/
75*54fd6939SJiyong Park CASSERT((PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL) &&
76*54fd6939SJiyong Park (PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL),
77*54fd6939SJiyong Park assert_platform_max_pwrlvl_check);
78*54fd6939SJiyong Park
79*54fd6939SJiyong Park /*
80*54fd6939SJiyong Park * The plat_local_state used by the platform is one of these types: RUN,
81*54fd6939SJiyong Park * RETENTION and OFF. The platform can define further sub-states for each type
82*54fd6939SJiyong Park * apart from RUN. This categorization is done to verify the sanity of the
83*54fd6939SJiyong Park * psci_power_state passed by the platform and to print debug information. The
84*54fd6939SJiyong Park * categorization is done on the basis of the following conditions:
85*54fd6939SJiyong Park *
86*54fd6939SJiyong Park * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN.
87*54fd6939SJiyong Park *
88*54fd6939SJiyong Park * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is
89*54fd6939SJiyong Park * STATE_TYPE_RETN.
90*54fd6939SJiyong Park *
91*54fd6939SJiyong Park * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is
92*54fd6939SJiyong Park * STATE_TYPE_OFF.
93*54fd6939SJiyong Park */
94*54fd6939SJiyong Park typedef enum plat_local_state_type {
95*54fd6939SJiyong Park STATE_TYPE_RUN = 0,
96*54fd6939SJiyong Park STATE_TYPE_RETN,
97*54fd6939SJiyong Park STATE_TYPE_OFF
98*54fd6939SJiyong Park } plat_local_state_type_t;
99*54fd6939SJiyong Park
100*54fd6939SJiyong Park /* Function used to categorize plat_local_state. */
find_local_state_type(plat_local_state_t state)101*54fd6939SJiyong Park static plat_local_state_type_t find_local_state_type(plat_local_state_t state)
102*54fd6939SJiyong Park {
103*54fd6939SJiyong Park if (state != 0U) {
104*54fd6939SJiyong Park if (state > PLAT_MAX_RET_STATE) {
105*54fd6939SJiyong Park return STATE_TYPE_OFF;
106*54fd6939SJiyong Park } else {
107*54fd6939SJiyong Park return STATE_TYPE_RETN;
108*54fd6939SJiyong Park }
109*54fd6939SJiyong Park } else {
110*54fd6939SJiyong Park return STATE_TYPE_RUN;
111*54fd6939SJiyong Park }
112*54fd6939SJiyong Park }
113*54fd6939SJiyong Park
114*54fd6939SJiyong Park /******************************************************************************
115*54fd6939SJiyong Park * Check that the maximum retention level supported by the platform is less
116*54fd6939SJiyong Park * than the maximum off level.
117*54fd6939SJiyong Park *****************************************************************************/
118*54fd6939SJiyong Park CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE,
119*54fd6939SJiyong Park assert_platform_max_off_and_retn_state_check);
120*54fd6939SJiyong Park
121*54fd6939SJiyong Park /******************************************************************************
122*54fd6939SJiyong Park * This function ensures that the power state parameter in a CPU_SUSPEND request
123*54fd6939SJiyong Park * is valid. If so, it returns the requested states for each power level.
124*54fd6939SJiyong Park *****************************************************************************/
psci_validate_power_state(unsigned int power_state,psci_power_state_t * state_info)125*54fd6939SJiyong Park int psci_validate_power_state(unsigned int power_state,
126*54fd6939SJiyong Park psci_power_state_t *state_info)
127*54fd6939SJiyong Park {
128*54fd6939SJiyong Park /* Check SBZ bits in power state are zero */
129*54fd6939SJiyong Park if (psci_check_power_state(power_state) != 0U)
130*54fd6939SJiyong Park return PSCI_E_INVALID_PARAMS;
131*54fd6939SJiyong Park
132*54fd6939SJiyong Park assert(psci_plat_pm_ops->validate_power_state != NULL);
133*54fd6939SJiyong Park
134*54fd6939SJiyong Park /* Validate the power_state using platform pm_ops */
135*54fd6939SJiyong Park return psci_plat_pm_ops->validate_power_state(power_state, state_info);
136*54fd6939SJiyong Park }
137*54fd6939SJiyong Park
138*54fd6939SJiyong Park /******************************************************************************
139*54fd6939SJiyong Park * This function retrieves the `psci_power_state_t` for system suspend from
140*54fd6939SJiyong Park * the platform.
141*54fd6939SJiyong Park *****************************************************************************/
psci_query_sys_suspend_pwrstate(psci_power_state_t * state_info)142*54fd6939SJiyong Park void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
143*54fd6939SJiyong Park {
144*54fd6939SJiyong Park /*
145*54fd6939SJiyong Park * Assert that the required pm_ops hook is implemented to ensure that
146*54fd6939SJiyong Park * the capability detected during psci_setup() is valid.
147*54fd6939SJiyong Park */
148*54fd6939SJiyong Park assert(psci_plat_pm_ops->get_sys_suspend_power_state != NULL);
149*54fd6939SJiyong Park
150*54fd6939SJiyong Park /*
151*54fd6939SJiyong Park * Query the platform for the power_state required for system suspend
152*54fd6939SJiyong Park */
153*54fd6939SJiyong Park psci_plat_pm_ops->get_sys_suspend_power_state(state_info);
154*54fd6939SJiyong Park }
155*54fd6939SJiyong Park
156*54fd6939SJiyong Park /*******************************************************************************
157*54fd6939SJiyong Park * This function verifies that the all the other cores in the system have been
158*54fd6939SJiyong Park * turned OFF and the current CPU is the last running CPU in the system.
159*54fd6939SJiyong Park * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
160*54fd6939SJiyong Park * otherwise.
161*54fd6939SJiyong Park ******************************************************************************/
psci_is_last_on_cpu(void)162*54fd6939SJiyong Park unsigned int psci_is_last_on_cpu(void)
163*54fd6939SJiyong Park {
164*54fd6939SJiyong Park unsigned int cpu_idx, my_idx = plat_my_core_pos();
165*54fd6939SJiyong Park
166*54fd6939SJiyong Park for (cpu_idx = 0; cpu_idx < psci_plat_core_count;
167*54fd6939SJiyong Park cpu_idx++) {
168*54fd6939SJiyong Park if (cpu_idx == my_idx) {
169*54fd6939SJiyong Park assert(psci_get_aff_info_state() == AFF_STATE_ON);
170*54fd6939SJiyong Park continue;
171*54fd6939SJiyong Park }
172*54fd6939SJiyong Park
173*54fd6939SJiyong Park if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF)
174*54fd6939SJiyong Park return 0;
175*54fd6939SJiyong Park }
176*54fd6939SJiyong Park
177*54fd6939SJiyong Park return 1;
178*54fd6939SJiyong Park }
179*54fd6939SJiyong Park
180*54fd6939SJiyong Park /*******************************************************************************
181*54fd6939SJiyong Park * Routine to return the maximum power level to traverse to after a cpu has
182*54fd6939SJiyong Park * been physically powered up. It is expected to be called immediately after
183*54fd6939SJiyong Park * reset from assembler code.
184*54fd6939SJiyong Park ******************************************************************************/
get_power_on_target_pwrlvl(void)185*54fd6939SJiyong Park static unsigned int get_power_on_target_pwrlvl(void)
186*54fd6939SJiyong Park {
187*54fd6939SJiyong Park unsigned int pwrlvl;
188*54fd6939SJiyong Park
189*54fd6939SJiyong Park /*
190*54fd6939SJiyong Park * Assume that this cpu was suspended and retrieve its target power
191*54fd6939SJiyong Park * level. If it is invalid then it could only have been turned off
192*54fd6939SJiyong Park * earlier. PLAT_MAX_PWR_LVL will be the highest power level a
193*54fd6939SJiyong Park * cpu can be turned off to.
194*54fd6939SJiyong Park */
195*54fd6939SJiyong Park pwrlvl = psci_get_suspend_pwrlvl();
196*54fd6939SJiyong Park if (pwrlvl == PSCI_INVALID_PWR_LVL)
197*54fd6939SJiyong Park pwrlvl = PLAT_MAX_PWR_LVL;
198*54fd6939SJiyong Park assert(pwrlvl < PSCI_INVALID_PWR_LVL);
199*54fd6939SJiyong Park return pwrlvl;
200*54fd6939SJiyong Park }
201*54fd6939SJiyong Park
202*54fd6939SJiyong Park /******************************************************************************
203*54fd6939SJiyong Park * Helper function to update the requested local power state array. This array
204*54fd6939SJiyong Park * does not store the requested state for the CPU power level. Hence an
205*54fd6939SJiyong Park * assertion is added to prevent us from accessing the CPU power level.
206*54fd6939SJiyong Park *****************************************************************************/
psci_set_req_local_pwr_state(unsigned int pwrlvl,unsigned int cpu_idx,plat_local_state_t req_pwr_state)207*54fd6939SJiyong Park static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
208*54fd6939SJiyong Park unsigned int cpu_idx,
209*54fd6939SJiyong Park plat_local_state_t req_pwr_state)
210*54fd6939SJiyong Park {
211*54fd6939SJiyong Park assert(pwrlvl > PSCI_CPU_PWR_LVL);
212*54fd6939SJiyong Park if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) &&
213*54fd6939SJiyong Park (cpu_idx < psci_plat_core_count)) {
214*54fd6939SJiyong Park psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx] = req_pwr_state;
215*54fd6939SJiyong Park }
216*54fd6939SJiyong Park }
217*54fd6939SJiyong Park
218*54fd6939SJiyong Park /******************************************************************************
219*54fd6939SJiyong Park * This function initializes the psci_req_local_pwr_states.
220*54fd6939SJiyong Park *****************************************************************************/
psci_init_req_local_pwr_states(void)221*54fd6939SJiyong Park void __init psci_init_req_local_pwr_states(void)
222*54fd6939SJiyong Park {
223*54fd6939SJiyong Park /* Initialize the requested state of all non CPU power domains as OFF */
224*54fd6939SJiyong Park unsigned int pwrlvl;
225*54fd6939SJiyong Park unsigned int core;
226*54fd6939SJiyong Park
227*54fd6939SJiyong Park for (pwrlvl = 0U; pwrlvl < PLAT_MAX_PWR_LVL; pwrlvl++) {
228*54fd6939SJiyong Park for (core = 0; core < psci_plat_core_count; core++) {
229*54fd6939SJiyong Park psci_req_local_pwr_states[pwrlvl][core] =
230*54fd6939SJiyong Park PLAT_MAX_OFF_STATE;
231*54fd6939SJiyong Park }
232*54fd6939SJiyong Park }
233*54fd6939SJiyong Park }
234*54fd6939SJiyong Park
235*54fd6939SJiyong Park /******************************************************************************
236*54fd6939SJiyong Park * Helper function to return a reference to an array containing the local power
237*54fd6939SJiyong Park * states requested by each cpu for a power domain at 'pwrlvl'. The size of the
238*54fd6939SJiyong Park * array will be the number of cpu power domains of which this power domain is
239*54fd6939SJiyong Park * an ancestor. These requested states will be used to determine a suitable
240*54fd6939SJiyong Park * target state for this power domain during psci state coordination. An
241*54fd6939SJiyong Park * assertion is added to prevent us from accessing the CPU power level.
242*54fd6939SJiyong Park *****************************************************************************/
psci_get_req_local_pwr_states(unsigned int pwrlvl,unsigned int cpu_idx)243*54fd6939SJiyong Park static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
244*54fd6939SJiyong Park unsigned int cpu_idx)
245*54fd6939SJiyong Park {
246*54fd6939SJiyong Park assert(pwrlvl > PSCI_CPU_PWR_LVL);
247*54fd6939SJiyong Park
248*54fd6939SJiyong Park if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) &&
249*54fd6939SJiyong Park (cpu_idx < psci_plat_core_count)) {
250*54fd6939SJiyong Park return &psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx];
251*54fd6939SJiyong Park } else
252*54fd6939SJiyong Park return NULL;
253*54fd6939SJiyong Park }
254*54fd6939SJiyong Park
255*54fd6939SJiyong Park /*
256*54fd6939SJiyong Park * psci_non_cpu_pd_nodes can be placed either in normal memory or coherent
257*54fd6939SJiyong Park * memory.
258*54fd6939SJiyong Park *
259*54fd6939SJiyong Park * With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory,
260*54fd6939SJiyong Park * it's accessed by both cached and non-cached participants. To serve the common
261*54fd6939SJiyong Park * minimum, perform a cache flush before read and after write so that non-cached
262*54fd6939SJiyong Park * participants operate on latest data in main memory.
263*54fd6939SJiyong Park *
264*54fd6939SJiyong Park * When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent
265*54fd6939SJiyong Park * memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent.
266*54fd6939SJiyong Park * In both cases, no cache operations are required.
267*54fd6939SJiyong Park */
268*54fd6939SJiyong Park
269*54fd6939SJiyong Park /*
270*54fd6939SJiyong Park * Retrieve local state of non-CPU power domain node from a non-cached CPU,
271*54fd6939SJiyong Park * after any required cache maintenance operation.
272*54fd6939SJiyong Park */
get_non_cpu_pd_node_local_state(unsigned int parent_idx)273*54fd6939SJiyong Park static plat_local_state_t get_non_cpu_pd_node_local_state(
274*54fd6939SJiyong Park unsigned int parent_idx)
275*54fd6939SJiyong Park {
276*54fd6939SJiyong Park #if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
277*54fd6939SJiyong Park flush_dcache_range(
278*54fd6939SJiyong Park (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
279*54fd6939SJiyong Park sizeof(psci_non_cpu_pd_nodes[parent_idx]));
280*54fd6939SJiyong Park #endif
281*54fd6939SJiyong Park return psci_non_cpu_pd_nodes[parent_idx].local_state;
282*54fd6939SJiyong Park }
283*54fd6939SJiyong Park
284*54fd6939SJiyong Park /*
285*54fd6939SJiyong Park * Update local state of non-CPU power domain node from a cached CPU; perform
286*54fd6939SJiyong Park * any required cache maintenance operation afterwards.
287*54fd6939SJiyong Park */
set_non_cpu_pd_node_local_state(unsigned int parent_idx,plat_local_state_t state)288*54fd6939SJiyong Park static void set_non_cpu_pd_node_local_state(unsigned int parent_idx,
289*54fd6939SJiyong Park plat_local_state_t state)
290*54fd6939SJiyong Park {
291*54fd6939SJiyong Park psci_non_cpu_pd_nodes[parent_idx].local_state = state;
292*54fd6939SJiyong Park #if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
293*54fd6939SJiyong Park flush_dcache_range(
294*54fd6939SJiyong Park (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
295*54fd6939SJiyong Park sizeof(psci_non_cpu_pd_nodes[parent_idx]));
296*54fd6939SJiyong Park #endif
297*54fd6939SJiyong Park }
298*54fd6939SJiyong Park
299*54fd6939SJiyong Park /******************************************************************************
300*54fd6939SJiyong Park * Helper function to return the current local power state of each power domain
301*54fd6939SJiyong Park * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
302*54fd6939SJiyong Park * function will be called after a cpu is powered on to find the local state
303*54fd6939SJiyong Park * each power domain has emerged from.
304*54fd6939SJiyong Park *****************************************************************************/
psci_get_target_local_pwr_states(unsigned int end_pwrlvl,psci_power_state_t * target_state)305*54fd6939SJiyong Park void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
306*54fd6939SJiyong Park psci_power_state_t *target_state)
307*54fd6939SJiyong Park {
308*54fd6939SJiyong Park unsigned int parent_idx, lvl;
309*54fd6939SJiyong Park plat_local_state_t *pd_state = target_state->pwr_domain_state;
310*54fd6939SJiyong Park
311*54fd6939SJiyong Park pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
312*54fd6939SJiyong Park parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
313*54fd6939SJiyong Park
314*54fd6939SJiyong Park /* Copy the local power state from node to state_info */
315*54fd6939SJiyong Park for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
316*54fd6939SJiyong Park pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx);
317*54fd6939SJiyong Park parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
318*54fd6939SJiyong Park }
319*54fd6939SJiyong Park
320*54fd6939SJiyong Park /* Set the the higher levels to RUN */
321*54fd6939SJiyong Park for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
322*54fd6939SJiyong Park target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
323*54fd6939SJiyong Park }
324*54fd6939SJiyong Park
325*54fd6939SJiyong Park /******************************************************************************
326*54fd6939SJiyong Park * Helper function to set the target local power state that each power domain
327*54fd6939SJiyong Park * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
328*54fd6939SJiyong Park * enter. This function will be called after coordination of requested power
329*54fd6939SJiyong Park * states has been done for each power level.
330*54fd6939SJiyong Park *****************************************************************************/
psci_set_target_local_pwr_states(unsigned int end_pwrlvl,const psci_power_state_t * target_state)331*54fd6939SJiyong Park static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
332*54fd6939SJiyong Park const psci_power_state_t *target_state)
333*54fd6939SJiyong Park {
334*54fd6939SJiyong Park unsigned int parent_idx, lvl;
335*54fd6939SJiyong Park const plat_local_state_t *pd_state = target_state->pwr_domain_state;
336*54fd6939SJiyong Park
337*54fd6939SJiyong Park psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
338*54fd6939SJiyong Park
339*54fd6939SJiyong Park /*
340*54fd6939SJiyong Park * Need to flush as local_state might be accessed with Data Cache
341*54fd6939SJiyong Park * disabled during power on
342*54fd6939SJiyong Park */
343*54fd6939SJiyong Park psci_flush_cpu_data(psci_svc_cpu_data.local_state);
344*54fd6939SJiyong Park
345*54fd6939SJiyong Park parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
346*54fd6939SJiyong Park
347*54fd6939SJiyong Park /* Copy the local_state from state_info */
348*54fd6939SJiyong Park for (lvl = 1U; lvl <= end_pwrlvl; lvl++) {
349*54fd6939SJiyong Park set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]);
350*54fd6939SJiyong Park parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
351*54fd6939SJiyong Park }
352*54fd6939SJiyong Park }
353*54fd6939SJiyong Park
354*54fd6939SJiyong Park
355*54fd6939SJiyong Park /*******************************************************************************
356*54fd6939SJiyong Park * PSCI helper function to get the parent nodes corresponding to a cpu_index.
357*54fd6939SJiyong Park ******************************************************************************/
psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,unsigned int end_lvl,unsigned int * node_index)358*54fd6939SJiyong Park void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
359*54fd6939SJiyong Park unsigned int end_lvl,
360*54fd6939SJiyong Park unsigned int *node_index)
361*54fd6939SJiyong Park {
362*54fd6939SJiyong Park unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
363*54fd6939SJiyong Park unsigned int i;
364*54fd6939SJiyong Park unsigned int *node = node_index;
365*54fd6939SJiyong Park
366*54fd6939SJiyong Park for (i = PSCI_CPU_PWR_LVL + 1U; i <= end_lvl; i++) {
367*54fd6939SJiyong Park *node = parent_node;
368*54fd6939SJiyong Park node++;
369*54fd6939SJiyong Park parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
370*54fd6939SJiyong Park }
371*54fd6939SJiyong Park }
372*54fd6939SJiyong Park
373*54fd6939SJiyong Park /******************************************************************************
374*54fd6939SJiyong Park * This function is invoked post CPU power up and initialization. It sets the
375*54fd6939SJiyong Park * affinity info state, target power state and requested power state for the
376*54fd6939SJiyong Park * current CPU and all its ancestor power domains to RUN.
377*54fd6939SJiyong Park *****************************************************************************/
psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)378*54fd6939SJiyong Park void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
379*54fd6939SJiyong Park {
380*54fd6939SJiyong Park unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl;
381*54fd6939SJiyong Park parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
382*54fd6939SJiyong Park
383*54fd6939SJiyong Park /* Reset the local_state to RUN for the non cpu power domains. */
384*54fd6939SJiyong Park for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
385*54fd6939SJiyong Park set_non_cpu_pd_node_local_state(parent_idx,
386*54fd6939SJiyong Park PSCI_LOCAL_STATE_RUN);
387*54fd6939SJiyong Park psci_set_req_local_pwr_state(lvl,
388*54fd6939SJiyong Park cpu_idx,
389*54fd6939SJiyong Park PSCI_LOCAL_STATE_RUN);
390*54fd6939SJiyong Park parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
391*54fd6939SJiyong Park }
392*54fd6939SJiyong Park
393*54fd6939SJiyong Park /* Set the affinity info state to ON */
394*54fd6939SJiyong Park psci_set_aff_info_state(AFF_STATE_ON);
395*54fd6939SJiyong Park
396*54fd6939SJiyong Park psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
397*54fd6939SJiyong Park psci_flush_cpu_data(psci_svc_cpu_data);
398*54fd6939SJiyong Park }
399*54fd6939SJiyong Park
400*54fd6939SJiyong Park /******************************************************************************
401*54fd6939SJiyong Park * This function is passed the local power states requested for each power
402*54fd6939SJiyong Park * domain (state_info) between the current CPU domain and its ancestors until
403*54fd6939SJiyong Park * the target power level (end_pwrlvl). It updates the array of requested power
404*54fd6939SJiyong Park * states with this information.
405*54fd6939SJiyong Park *
406*54fd6939SJiyong Park * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
407*54fd6939SJiyong Park * retrieves the states requested by all the cpus of which the power domain at
408*54fd6939SJiyong Park * that level is an ancestor. It passes this information to the platform to
409*54fd6939SJiyong Park * coordinate and return the target power state. If the target state for a level
410*54fd6939SJiyong Park * is RUN then subsequent levels are not considered. At the CPU level, state
411*54fd6939SJiyong Park * coordination is not required. Hence, the requested and the target states are
412*54fd6939SJiyong Park * the same.
413*54fd6939SJiyong Park *
414*54fd6939SJiyong Park * The 'state_info' is updated with the target state for each level between the
415*54fd6939SJiyong Park * CPU and the 'end_pwrlvl' and returned to the caller.
416*54fd6939SJiyong Park *
417*54fd6939SJiyong Park * This function will only be invoked with data cache enabled and while
418*54fd6939SJiyong Park * powering down a core.
419*54fd6939SJiyong Park *****************************************************************************/
psci_do_state_coordination(unsigned int end_pwrlvl,psci_power_state_t * state_info)420*54fd6939SJiyong Park void psci_do_state_coordination(unsigned int end_pwrlvl,
421*54fd6939SJiyong Park psci_power_state_t *state_info)
422*54fd6939SJiyong Park {
423*54fd6939SJiyong Park unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
424*54fd6939SJiyong Park unsigned int start_idx;
425*54fd6939SJiyong Park unsigned int ncpus;
426*54fd6939SJiyong Park plat_local_state_t target_state, *req_states;
427*54fd6939SJiyong Park
428*54fd6939SJiyong Park assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
429*54fd6939SJiyong Park parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
430*54fd6939SJiyong Park
431*54fd6939SJiyong Park /* For level 0, the requested state will be equivalent
432*54fd6939SJiyong Park to target state */
433*54fd6939SJiyong Park for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
434*54fd6939SJiyong Park
435*54fd6939SJiyong Park /* First update the requested power state */
436*54fd6939SJiyong Park psci_set_req_local_pwr_state(lvl, cpu_idx,
437*54fd6939SJiyong Park state_info->pwr_domain_state[lvl]);
438*54fd6939SJiyong Park
439*54fd6939SJiyong Park /* Get the requested power states for this power level */
440*54fd6939SJiyong Park start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
441*54fd6939SJiyong Park req_states = psci_get_req_local_pwr_states(lvl, start_idx);
442*54fd6939SJiyong Park
443*54fd6939SJiyong Park /*
444*54fd6939SJiyong Park * Let the platform coordinate amongst the requested states at
445*54fd6939SJiyong Park * this power level and return the target local power state.
446*54fd6939SJiyong Park */
447*54fd6939SJiyong Park ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
448*54fd6939SJiyong Park target_state = plat_get_target_pwr_state(lvl,
449*54fd6939SJiyong Park req_states,
450*54fd6939SJiyong Park ncpus);
451*54fd6939SJiyong Park
452*54fd6939SJiyong Park state_info->pwr_domain_state[lvl] = target_state;
453*54fd6939SJiyong Park
454*54fd6939SJiyong Park /* Break early if the negotiated target power state is RUN */
455*54fd6939SJiyong Park if (is_local_state_run(state_info->pwr_domain_state[lvl]) != 0)
456*54fd6939SJiyong Park break;
457*54fd6939SJiyong Park
458*54fd6939SJiyong Park parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
459*54fd6939SJiyong Park }
460*54fd6939SJiyong Park
461*54fd6939SJiyong Park /*
462*54fd6939SJiyong Park * This is for cases when we break out of the above loop early because
463*54fd6939SJiyong Park * the target power state is RUN at a power level < end_pwlvl.
464*54fd6939SJiyong Park * We update the requested power state from state_info and then
465*54fd6939SJiyong Park * set the target state as RUN.
466*54fd6939SJiyong Park */
467*54fd6939SJiyong Park for (lvl = lvl + 1U; lvl <= end_pwrlvl; lvl++) {
468*54fd6939SJiyong Park psci_set_req_local_pwr_state(lvl, cpu_idx,
469*54fd6939SJiyong Park state_info->pwr_domain_state[lvl]);
470*54fd6939SJiyong Park state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
471*54fd6939SJiyong Park
472*54fd6939SJiyong Park }
473*54fd6939SJiyong Park
474*54fd6939SJiyong Park /* Update the target state in the power domain nodes */
475*54fd6939SJiyong Park psci_set_target_local_pwr_states(end_pwrlvl, state_info);
476*54fd6939SJiyong Park }
477*54fd6939SJiyong Park
478*54fd6939SJiyong Park /******************************************************************************
479*54fd6939SJiyong Park * This function validates a suspend request by making sure that if a standby
480*54fd6939SJiyong Park * state is requested then no power level is turned off and the highest power
481*54fd6939SJiyong Park * level is placed in a standby/retention state.
482*54fd6939SJiyong Park *
483*54fd6939SJiyong Park * It also ensures that the state level X will enter is not shallower than the
484*54fd6939SJiyong Park * state level X + 1 will enter.
485*54fd6939SJiyong Park *
486*54fd6939SJiyong Park * This validation will be enabled only for DEBUG builds as the platform is
487*54fd6939SJiyong Park * expected to perform these validations as well.
488*54fd6939SJiyong Park *****************************************************************************/
psci_validate_suspend_req(const psci_power_state_t * state_info,unsigned int is_power_down_state)489*54fd6939SJiyong Park int psci_validate_suspend_req(const psci_power_state_t *state_info,
490*54fd6939SJiyong Park unsigned int is_power_down_state)
491*54fd6939SJiyong Park {
492*54fd6939SJiyong Park unsigned int max_off_lvl, target_lvl, max_retn_lvl;
493*54fd6939SJiyong Park plat_local_state_t state;
494*54fd6939SJiyong Park plat_local_state_type_t req_state_type, deepest_state_type;
495*54fd6939SJiyong Park int i;
496*54fd6939SJiyong Park
497*54fd6939SJiyong Park /* Find the target suspend power level */
498*54fd6939SJiyong Park target_lvl = psci_find_target_suspend_lvl(state_info);
499*54fd6939SJiyong Park if (target_lvl == PSCI_INVALID_PWR_LVL)
500*54fd6939SJiyong Park return PSCI_E_INVALID_PARAMS;
501*54fd6939SJiyong Park
502*54fd6939SJiyong Park /* All power domain levels are in a RUN state to begin with */
503*54fd6939SJiyong Park deepest_state_type = STATE_TYPE_RUN;
504*54fd6939SJiyong Park
505*54fd6939SJiyong Park for (i = (int) target_lvl; i >= (int) PSCI_CPU_PWR_LVL; i--) {
506*54fd6939SJiyong Park state = state_info->pwr_domain_state[i];
507*54fd6939SJiyong Park req_state_type = find_local_state_type(state);
508*54fd6939SJiyong Park
509*54fd6939SJiyong Park /*
510*54fd6939SJiyong Park * While traversing from the highest power level to the lowest,
511*54fd6939SJiyong Park * the state requested for lower levels has to be the same or
512*54fd6939SJiyong Park * deeper i.e. equal to or greater than the state at the higher
513*54fd6939SJiyong Park * levels. If this condition is true, then the requested state
514*54fd6939SJiyong Park * becomes the deepest state encountered so far.
515*54fd6939SJiyong Park */
516*54fd6939SJiyong Park if (req_state_type < deepest_state_type)
517*54fd6939SJiyong Park return PSCI_E_INVALID_PARAMS;
518*54fd6939SJiyong Park deepest_state_type = req_state_type;
519*54fd6939SJiyong Park }
520*54fd6939SJiyong Park
521*54fd6939SJiyong Park /* Find the highest off power level */
522*54fd6939SJiyong Park max_off_lvl = psci_find_max_off_lvl(state_info);
523*54fd6939SJiyong Park
524*54fd6939SJiyong Park /* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
525*54fd6939SJiyong Park max_retn_lvl = PSCI_INVALID_PWR_LVL;
526*54fd6939SJiyong Park if (target_lvl != max_off_lvl)
527*54fd6939SJiyong Park max_retn_lvl = target_lvl;
528*54fd6939SJiyong Park
529*54fd6939SJiyong Park /*
530*54fd6939SJiyong Park * If this is not a request for a power down state then max off level
531*54fd6939SJiyong Park * has to be invalid and max retention level has to be a valid power
532*54fd6939SJiyong Park * level.
533*54fd6939SJiyong Park */
534*54fd6939SJiyong Park if ((is_power_down_state == 0U) &&
535*54fd6939SJiyong Park ((max_off_lvl != PSCI_INVALID_PWR_LVL) ||
536*54fd6939SJiyong Park (max_retn_lvl == PSCI_INVALID_PWR_LVL)))
537*54fd6939SJiyong Park return PSCI_E_INVALID_PARAMS;
538*54fd6939SJiyong Park
539*54fd6939SJiyong Park return PSCI_E_SUCCESS;
540*54fd6939SJiyong Park }
541*54fd6939SJiyong Park
542*54fd6939SJiyong Park /******************************************************************************
543*54fd6939SJiyong Park * This function finds the highest power level which will be powered down
544*54fd6939SJiyong Park * amongst all the power levels specified in the 'state_info' structure
545*54fd6939SJiyong Park *****************************************************************************/
psci_find_max_off_lvl(const psci_power_state_t * state_info)546*54fd6939SJiyong Park unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
547*54fd6939SJiyong Park {
548*54fd6939SJiyong Park int i;
549*54fd6939SJiyong Park
550*54fd6939SJiyong Park for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
551*54fd6939SJiyong Park if (is_local_state_off(state_info->pwr_domain_state[i]) != 0)
552*54fd6939SJiyong Park return (unsigned int) i;
553*54fd6939SJiyong Park }
554*54fd6939SJiyong Park
555*54fd6939SJiyong Park return PSCI_INVALID_PWR_LVL;
556*54fd6939SJiyong Park }
557*54fd6939SJiyong Park
558*54fd6939SJiyong Park /******************************************************************************
559*54fd6939SJiyong Park * This functions finds the level of the highest power domain which will be
560*54fd6939SJiyong Park * placed in a low power state during a suspend operation.
561*54fd6939SJiyong Park *****************************************************************************/
psci_find_target_suspend_lvl(const psci_power_state_t * state_info)562*54fd6939SJiyong Park unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
563*54fd6939SJiyong Park {
564*54fd6939SJiyong Park int i;
565*54fd6939SJiyong Park
566*54fd6939SJiyong Park for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
567*54fd6939SJiyong Park if (is_local_state_run(state_info->pwr_domain_state[i]) == 0)
568*54fd6939SJiyong Park return (unsigned int) i;
569*54fd6939SJiyong Park }
570*54fd6939SJiyong Park
571*54fd6939SJiyong Park return PSCI_INVALID_PWR_LVL;
572*54fd6939SJiyong Park }
573*54fd6939SJiyong Park
574*54fd6939SJiyong Park /*******************************************************************************
575*54fd6939SJiyong Park * This function is passed the highest level in the topology tree that the
576*54fd6939SJiyong Park * operation should be applied to and a list of node indexes. It picks up locks
577*54fd6939SJiyong Park * from the node index list in order of increasing power domain level in the
578*54fd6939SJiyong Park * range specified.
579*54fd6939SJiyong Park ******************************************************************************/
psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,const unsigned int * parent_nodes)580*54fd6939SJiyong Park void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
581*54fd6939SJiyong Park const unsigned int *parent_nodes)
582*54fd6939SJiyong Park {
583*54fd6939SJiyong Park unsigned int parent_idx;
584*54fd6939SJiyong Park unsigned int level;
585*54fd6939SJiyong Park
586*54fd6939SJiyong Park /* No locking required for level 0. Hence start locking from level 1 */
587*54fd6939SJiyong Park for (level = PSCI_CPU_PWR_LVL + 1U; level <= end_pwrlvl; level++) {
588*54fd6939SJiyong Park parent_idx = parent_nodes[level - 1U];
589*54fd6939SJiyong Park psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
590*54fd6939SJiyong Park }
591*54fd6939SJiyong Park }
592*54fd6939SJiyong Park
593*54fd6939SJiyong Park /*******************************************************************************
594*54fd6939SJiyong Park * This function is passed the highest level in the topology tree that the
595*54fd6939SJiyong Park * operation should be applied to and a list of node indexes. It releases the
596*54fd6939SJiyong Park * locks in order of decreasing power domain level in the range specified.
597*54fd6939SJiyong Park ******************************************************************************/
psci_release_pwr_domain_locks(unsigned int end_pwrlvl,const unsigned int * parent_nodes)598*54fd6939SJiyong Park void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
599*54fd6939SJiyong Park const unsigned int *parent_nodes)
600*54fd6939SJiyong Park {
601*54fd6939SJiyong Park unsigned int parent_idx;
602*54fd6939SJiyong Park unsigned int level;
603*54fd6939SJiyong Park
604*54fd6939SJiyong Park /* Unlock top down. No unlocking required for level 0. */
605*54fd6939SJiyong Park for (level = end_pwrlvl; level >= (PSCI_CPU_PWR_LVL + 1U); level--) {
606*54fd6939SJiyong Park parent_idx = parent_nodes[level - 1U];
607*54fd6939SJiyong Park psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
608*54fd6939SJiyong Park }
609*54fd6939SJiyong Park }
610*54fd6939SJiyong Park
611*54fd6939SJiyong Park /*******************************************************************************
612*54fd6939SJiyong Park * Simple routine to determine whether a mpidr is valid or not.
613*54fd6939SJiyong Park ******************************************************************************/
psci_validate_mpidr(u_register_t mpidr)614*54fd6939SJiyong Park int psci_validate_mpidr(u_register_t mpidr)
615*54fd6939SJiyong Park {
616*54fd6939SJiyong Park if (plat_core_pos_by_mpidr(mpidr) < 0)
617*54fd6939SJiyong Park return PSCI_E_INVALID_PARAMS;
618*54fd6939SJiyong Park
619*54fd6939SJiyong Park return PSCI_E_SUCCESS;
620*54fd6939SJiyong Park }
621*54fd6939SJiyong Park
622*54fd6939SJiyong Park /*******************************************************************************
623*54fd6939SJiyong Park * This function determines the full entrypoint information for the requested
624*54fd6939SJiyong Park * PSCI entrypoint on power on/resume and returns it.
625*54fd6939SJiyong Park ******************************************************************************/
626*54fd6939SJiyong Park #ifdef __aarch64__
psci_get_ns_ep_info(entry_point_info_t * ep,uintptr_t entrypoint,u_register_t context_id)627*54fd6939SJiyong Park static int psci_get_ns_ep_info(entry_point_info_t *ep,
628*54fd6939SJiyong Park uintptr_t entrypoint,
629*54fd6939SJiyong Park u_register_t context_id)
630*54fd6939SJiyong Park {
631*54fd6939SJiyong Park u_register_t ep_attr, sctlr;
632*54fd6939SJiyong Park unsigned int daif, ee, mode;
633*54fd6939SJiyong Park u_register_t ns_scr_el3 = read_scr_el3();
634*54fd6939SJiyong Park u_register_t ns_sctlr_el1 = read_sctlr_el1();
635*54fd6939SJiyong Park
636*54fd6939SJiyong Park sctlr = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
637*54fd6939SJiyong Park read_sctlr_el2() : ns_sctlr_el1;
638*54fd6939SJiyong Park ee = 0;
639*54fd6939SJiyong Park
640*54fd6939SJiyong Park ep_attr = NON_SECURE | EP_ST_DISABLE;
641*54fd6939SJiyong Park if ((sctlr & SCTLR_EE_BIT) != 0U) {
642*54fd6939SJiyong Park ep_attr |= EP_EE_BIG;
643*54fd6939SJiyong Park ee = 1;
644*54fd6939SJiyong Park }
645*54fd6939SJiyong Park SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
646*54fd6939SJiyong Park
647*54fd6939SJiyong Park ep->pc = entrypoint;
648*54fd6939SJiyong Park zeromem(&ep->args, sizeof(ep->args));
649*54fd6939SJiyong Park ep->args.arg0 = context_id;
650*54fd6939SJiyong Park
651*54fd6939SJiyong Park /*
652*54fd6939SJiyong Park * Figure out whether the cpu enters the non-secure address space
653*54fd6939SJiyong Park * in aarch32 or aarch64
654*54fd6939SJiyong Park */
655*54fd6939SJiyong Park if ((ns_scr_el3 & SCR_RW_BIT) != 0U) {
656*54fd6939SJiyong Park
657*54fd6939SJiyong Park /*
658*54fd6939SJiyong Park * Check whether a Thumb entry point has been provided for an
659*54fd6939SJiyong Park * aarch64 EL
660*54fd6939SJiyong Park */
661*54fd6939SJiyong Park if ((entrypoint & 0x1UL) != 0UL)
662*54fd6939SJiyong Park return PSCI_E_INVALID_ADDRESS;
663*54fd6939SJiyong Park
664*54fd6939SJiyong Park mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? MODE_EL2 : MODE_EL1;
665*54fd6939SJiyong Park
666*54fd6939SJiyong Park ep->spsr = SPSR_64((uint64_t)mode, MODE_SP_ELX,
667*54fd6939SJiyong Park DISABLE_ALL_EXCEPTIONS);
668*54fd6939SJiyong Park } else {
669*54fd6939SJiyong Park
670*54fd6939SJiyong Park mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
671*54fd6939SJiyong Park MODE32_hyp : MODE32_svc;
672*54fd6939SJiyong Park
673*54fd6939SJiyong Park /*
674*54fd6939SJiyong Park * TODO: Choose async. exception bits if HYP mode is not
675*54fd6939SJiyong Park * implemented according to the values of SCR.{AW, FW} bits
676*54fd6939SJiyong Park */
677*54fd6939SJiyong Park daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
678*54fd6939SJiyong Park
679*54fd6939SJiyong Park ep->spsr = SPSR_MODE32((uint64_t)mode, entrypoint & 0x1, ee,
680*54fd6939SJiyong Park daif);
681*54fd6939SJiyong Park }
682*54fd6939SJiyong Park
683*54fd6939SJiyong Park return PSCI_E_SUCCESS;
684*54fd6939SJiyong Park }
685*54fd6939SJiyong Park #else /* !__aarch64__ */
psci_get_ns_ep_info(entry_point_info_t * ep,uintptr_t entrypoint,u_register_t context_id)686*54fd6939SJiyong Park static int psci_get_ns_ep_info(entry_point_info_t *ep,
687*54fd6939SJiyong Park uintptr_t entrypoint,
688*54fd6939SJiyong Park u_register_t context_id)
689*54fd6939SJiyong Park {
690*54fd6939SJiyong Park u_register_t ep_attr;
691*54fd6939SJiyong Park unsigned int aif, ee, mode;
692*54fd6939SJiyong Park u_register_t scr = read_scr();
693*54fd6939SJiyong Park u_register_t ns_sctlr, sctlr;
694*54fd6939SJiyong Park
695*54fd6939SJiyong Park /* Switch to non secure state */
696*54fd6939SJiyong Park write_scr(scr | SCR_NS_BIT);
697*54fd6939SJiyong Park isb();
698*54fd6939SJiyong Park ns_sctlr = read_sctlr();
699*54fd6939SJiyong Park
700*54fd6939SJiyong Park sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
701*54fd6939SJiyong Park
702*54fd6939SJiyong Park /* Return to original state */
703*54fd6939SJiyong Park write_scr(scr);
704*54fd6939SJiyong Park isb();
705*54fd6939SJiyong Park ee = 0;
706*54fd6939SJiyong Park
707*54fd6939SJiyong Park ep_attr = NON_SECURE | EP_ST_DISABLE;
708*54fd6939SJiyong Park if (sctlr & SCTLR_EE_BIT) {
709*54fd6939SJiyong Park ep_attr |= EP_EE_BIG;
710*54fd6939SJiyong Park ee = 1;
711*54fd6939SJiyong Park }
712*54fd6939SJiyong Park SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
713*54fd6939SJiyong Park
714*54fd6939SJiyong Park ep->pc = entrypoint;
715*54fd6939SJiyong Park zeromem(&ep->args, sizeof(ep->args));
716*54fd6939SJiyong Park ep->args.arg0 = context_id;
717*54fd6939SJiyong Park
718*54fd6939SJiyong Park mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
719*54fd6939SJiyong Park
720*54fd6939SJiyong Park /*
721*54fd6939SJiyong Park * TODO: Choose async. exception bits if HYP mode is not
722*54fd6939SJiyong Park * implemented according to the values of SCR.{AW, FW} bits
723*54fd6939SJiyong Park */
724*54fd6939SJiyong Park aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
725*54fd6939SJiyong Park
726*54fd6939SJiyong Park ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
727*54fd6939SJiyong Park
728*54fd6939SJiyong Park return PSCI_E_SUCCESS;
729*54fd6939SJiyong Park }
730*54fd6939SJiyong Park
731*54fd6939SJiyong Park #endif /* __aarch64__ */
732*54fd6939SJiyong Park
733*54fd6939SJiyong Park /*******************************************************************************
734*54fd6939SJiyong Park * This function validates the entrypoint with the platform layer if the
735*54fd6939SJiyong Park * appropriate pm_ops hook is exported by the platform and returns the
736*54fd6939SJiyong Park * 'entry_point_info'.
737*54fd6939SJiyong Park ******************************************************************************/
psci_validate_entry_point(entry_point_info_t * ep,uintptr_t entrypoint,u_register_t context_id)738*54fd6939SJiyong Park int psci_validate_entry_point(entry_point_info_t *ep,
739*54fd6939SJiyong Park uintptr_t entrypoint,
740*54fd6939SJiyong Park u_register_t context_id)
741*54fd6939SJiyong Park {
742*54fd6939SJiyong Park int rc;
743*54fd6939SJiyong Park
744*54fd6939SJiyong Park /* Validate the entrypoint using platform psci_ops */
745*54fd6939SJiyong Park if (psci_plat_pm_ops->validate_ns_entrypoint != NULL) {
746*54fd6939SJiyong Park rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
747*54fd6939SJiyong Park if (rc != PSCI_E_SUCCESS)
748*54fd6939SJiyong Park return PSCI_E_INVALID_ADDRESS;
749*54fd6939SJiyong Park }
750*54fd6939SJiyong Park
751*54fd6939SJiyong Park /*
752*54fd6939SJiyong Park * Verify and derive the re-entry information for
753*54fd6939SJiyong Park * the non-secure world from the non-secure state from
754*54fd6939SJiyong Park * where this call originated.
755*54fd6939SJiyong Park */
756*54fd6939SJiyong Park rc = psci_get_ns_ep_info(ep, entrypoint, context_id);
757*54fd6939SJiyong Park return rc;
758*54fd6939SJiyong Park }
759*54fd6939SJiyong Park
760*54fd6939SJiyong Park /*******************************************************************************
761*54fd6939SJiyong Park * Generic handler which is called when a cpu is physically powered on. It
762*54fd6939SJiyong Park * traverses the node information and finds the highest power level powered
763*54fd6939SJiyong Park * off and performs generic, architectural, platform setup and state management
764*54fd6939SJiyong Park * to power on that power level and power levels below it.
765*54fd6939SJiyong Park * e.g. For a cpu that's been powered on, it will call the platform specific
766*54fd6939SJiyong Park * code to enable the gic cpu interface and for a cluster it will enable
767*54fd6939SJiyong Park * coherency at the interconnect level in addition to gic cpu interface.
768*54fd6939SJiyong Park ******************************************************************************/
psci_warmboot_entrypoint(void)769*54fd6939SJiyong Park void psci_warmboot_entrypoint(void)
770*54fd6939SJiyong Park {
771*54fd6939SJiyong Park unsigned int end_pwrlvl;
772*54fd6939SJiyong Park unsigned int cpu_idx = plat_my_core_pos();
773*54fd6939SJiyong Park unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
774*54fd6939SJiyong Park psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
775*54fd6939SJiyong Park
776*54fd6939SJiyong Park /*
777*54fd6939SJiyong Park * Verify that we have been explicitly turned ON or resumed from
778*54fd6939SJiyong Park * suspend.
779*54fd6939SJiyong Park */
780*54fd6939SJiyong Park if (psci_get_aff_info_state() == AFF_STATE_OFF) {
781*54fd6939SJiyong Park ERROR("Unexpected affinity info state.\n");
782*54fd6939SJiyong Park panic();
783*54fd6939SJiyong Park }
784*54fd6939SJiyong Park
785*54fd6939SJiyong Park /*
786*54fd6939SJiyong Park * Get the maximum power domain level to traverse to after this cpu
787*54fd6939SJiyong Park * has been physically powered up.
788*54fd6939SJiyong Park */
789*54fd6939SJiyong Park end_pwrlvl = get_power_on_target_pwrlvl();
790*54fd6939SJiyong Park
791*54fd6939SJiyong Park /* Get the parent nodes */
792*54fd6939SJiyong Park psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
793*54fd6939SJiyong Park
794*54fd6939SJiyong Park /*
795*54fd6939SJiyong Park * This function acquires the lock corresponding to each power level so
796*54fd6939SJiyong Park * that by the time all locks are taken, the system topology is snapshot
797*54fd6939SJiyong Park * and state management can be done safely.
798*54fd6939SJiyong Park */
799*54fd6939SJiyong Park psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
800*54fd6939SJiyong Park
801*54fd6939SJiyong Park psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
802*54fd6939SJiyong Park
803*54fd6939SJiyong Park #if ENABLE_PSCI_STAT
804*54fd6939SJiyong Park plat_psci_stat_accounting_stop(&state_info);
805*54fd6939SJiyong Park #endif
806*54fd6939SJiyong Park
807*54fd6939SJiyong Park /*
808*54fd6939SJiyong Park * This CPU could be resuming from suspend or it could have just been
809*54fd6939SJiyong Park * turned on. To distinguish between these 2 cases, we examine the
810*54fd6939SJiyong Park * affinity state of the CPU:
811*54fd6939SJiyong Park * - If the affinity state is ON_PENDING then it has just been
812*54fd6939SJiyong Park * turned on.
813*54fd6939SJiyong Park * - Else it is resuming from suspend.
814*54fd6939SJiyong Park *
815*54fd6939SJiyong Park * Depending on the type of warm reset identified, choose the right set
816*54fd6939SJiyong Park * of power management handler and perform the generic, architecture
817*54fd6939SJiyong Park * and platform specific handling.
818*54fd6939SJiyong Park */
819*54fd6939SJiyong Park if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING)
820*54fd6939SJiyong Park psci_cpu_on_finish(cpu_idx, &state_info);
821*54fd6939SJiyong Park else
822*54fd6939SJiyong Park psci_cpu_suspend_finish(cpu_idx, &state_info);
823*54fd6939SJiyong Park
824*54fd6939SJiyong Park /*
825*54fd6939SJiyong Park * Set the requested and target state of this CPU and all the higher
826*54fd6939SJiyong Park * power domains which are ancestors of this CPU to run.
827*54fd6939SJiyong Park */
828*54fd6939SJiyong Park psci_set_pwr_domains_to_run(end_pwrlvl);
829*54fd6939SJiyong Park
830*54fd6939SJiyong Park #if ENABLE_PSCI_STAT
831*54fd6939SJiyong Park /*
832*54fd6939SJiyong Park * Update PSCI stats.
833*54fd6939SJiyong Park * Caches are off when writing stats data on the power down path.
834*54fd6939SJiyong Park * Since caches are now enabled, it's necessary to do cache
835*54fd6939SJiyong Park * maintenance before reading that same data.
836*54fd6939SJiyong Park */
837*54fd6939SJiyong Park psci_stats_update_pwr_up(end_pwrlvl, &state_info);
838*54fd6939SJiyong Park #endif
839*54fd6939SJiyong Park
840*54fd6939SJiyong Park /*
841*54fd6939SJiyong Park * This loop releases the lock corresponding to each power level
842*54fd6939SJiyong Park * in the reverse order to which they were acquired.
843*54fd6939SJiyong Park */
844*54fd6939SJiyong Park psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
845*54fd6939SJiyong Park }
846*54fd6939SJiyong Park
847*54fd6939SJiyong Park /*******************************************************************************
848*54fd6939SJiyong Park * This function initializes the set of hooks that PSCI invokes as part of power
849*54fd6939SJiyong Park * management operation. The power management hooks are expected to be provided
850*54fd6939SJiyong Park * by the SPD, after it finishes all its initialization
851*54fd6939SJiyong Park ******************************************************************************/
psci_register_spd_pm_hook(const spd_pm_ops_t * pm)852*54fd6939SJiyong Park void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
853*54fd6939SJiyong Park {
854*54fd6939SJiyong Park assert(pm != NULL);
855*54fd6939SJiyong Park psci_spd_pm = pm;
856*54fd6939SJiyong Park
857*54fd6939SJiyong Park if (pm->svc_migrate != NULL)
858*54fd6939SJiyong Park psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);
859*54fd6939SJiyong Park
860*54fd6939SJiyong Park if (pm->svc_migrate_info != NULL)
861*54fd6939SJiyong Park psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
862*54fd6939SJiyong Park | define_psci_cap(PSCI_MIG_INFO_TYPE);
863*54fd6939SJiyong Park }
864*54fd6939SJiyong Park
865*54fd6939SJiyong Park /*******************************************************************************
866*54fd6939SJiyong Park * This function invokes the migrate info hook in the spd_pm_ops. It performs
867*54fd6939SJiyong Park * the necessary return value validation. If the Secure Payload is UP and
868*54fd6939SJiyong Park * migrate capable, it returns the mpidr of the CPU on which the Secure payload
869*54fd6939SJiyong Park * is resident through the mpidr parameter. Else the value of the parameter on
870*54fd6939SJiyong Park * return is undefined.
871*54fd6939SJiyong Park ******************************************************************************/
psci_spd_migrate_info(u_register_t * mpidr)872*54fd6939SJiyong Park int psci_spd_migrate_info(u_register_t *mpidr)
873*54fd6939SJiyong Park {
874*54fd6939SJiyong Park int rc;
875*54fd6939SJiyong Park
876*54fd6939SJiyong Park if ((psci_spd_pm == NULL) || (psci_spd_pm->svc_migrate_info == NULL))
877*54fd6939SJiyong Park return PSCI_E_NOT_SUPPORTED;
878*54fd6939SJiyong Park
879*54fd6939SJiyong Park rc = psci_spd_pm->svc_migrate_info(mpidr);
880*54fd6939SJiyong Park
881*54fd6939SJiyong Park assert((rc == PSCI_TOS_UP_MIG_CAP) || (rc == PSCI_TOS_NOT_UP_MIG_CAP) ||
882*54fd6939SJiyong Park (rc == PSCI_TOS_NOT_PRESENT_MP) || (rc == PSCI_E_NOT_SUPPORTED));
883*54fd6939SJiyong Park
884*54fd6939SJiyong Park return rc;
885*54fd6939SJiyong Park }
886*54fd6939SJiyong Park
887*54fd6939SJiyong Park
888*54fd6939SJiyong Park /*******************************************************************************
889*54fd6939SJiyong Park * This function prints the state of all power domains present in the
890*54fd6939SJiyong Park * system
891*54fd6939SJiyong Park ******************************************************************************/
psci_print_power_domain_map(void)892*54fd6939SJiyong Park void psci_print_power_domain_map(void)
893*54fd6939SJiyong Park {
894*54fd6939SJiyong Park #if LOG_LEVEL >= LOG_LEVEL_INFO
895*54fd6939SJiyong Park unsigned int idx;
896*54fd6939SJiyong Park plat_local_state_t state;
897*54fd6939SJiyong Park plat_local_state_type_t state_type;
898*54fd6939SJiyong Park
899*54fd6939SJiyong Park /* This array maps to the PSCI_STATE_X definitions in psci.h */
900*54fd6939SJiyong Park static const char * const psci_state_type_str[] = {
901*54fd6939SJiyong Park "ON",
902*54fd6939SJiyong Park "RETENTION",
903*54fd6939SJiyong Park "OFF",
904*54fd6939SJiyong Park };
905*54fd6939SJiyong Park
906*54fd6939SJiyong Park INFO("PSCI Power Domain Map:\n");
907*54fd6939SJiyong Park for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - psci_plat_core_count);
908*54fd6939SJiyong Park idx++) {
909*54fd6939SJiyong Park state_type = find_local_state_type(
910*54fd6939SJiyong Park psci_non_cpu_pd_nodes[idx].local_state);
911*54fd6939SJiyong Park INFO(" Domain Node : Level %u, parent_node %d,"
912*54fd6939SJiyong Park " State %s (0x%x)\n",
913*54fd6939SJiyong Park psci_non_cpu_pd_nodes[idx].level,
914*54fd6939SJiyong Park psci_non_cpu_pd_nodes[idx].parent_node,
915*54fd6939SJiyong Park psci_state_type_str[state_type],
916*54fd6939SJiyong Park psci_non_cpu_pd_nodes[idx].local_state);
917*54fd6939SJiyong Park }
918*54fd6939SJiyong Park
919*54fd6939SJiyong Park for (idx = 0; idx < psci_plat_core_count; idx++) {
920*54fd6939SJiyong Park state = psci_get_cpu_local_state_by_idx(idx);
921*54fd6939SJiyong Park state_type = find_local_state_type(state);
922*54fd6939SJiyong Park INFO(" CPU Node : MPID 0x%llx, parent_node %d,"
923*54fd6939SJiyong Park " State %s (0x%x)\n",
924*54fd6939SJiyong Park (unsigned long long)psci_cpu_pd_nodes[idx].mpidr,
925*54fd6939SJiyong Park psci_cpu_pd_nodes[idx].parent_node,
926*54fd6939SJiyong Park psci_state_type_str[state_type],
927*54fd6939SJiyong Park psci_get_cpu_local_state_by_idx(idx));
928*54fd6939SJiyong Park }
929*54fd6939SJiyong Park #endif
930*54fd6939SJiyong Park }
931*54fd6939SJiyong Park
932*54fd6939SJiyong Park /******************************************************************************
933*54fd6939SJiyong Park * Return whether any secondaries were powered up with CPU_ON call. A CPU that
934*54fd6939SJiyong Park * have ever been powered up would have set its MPDIR value to something other
935*54fd6939SJiyong Park * than PSCI_INVALID_MPIDR. Note that MPDIR isn't reset back to
936*54fd6939SJiyong Park * PSCI_INVALID_MPIDR when a CPU is powered down later, so the return value is
937*54fd6939SJiyong Park * meaningful only when called on the primary CPU during early boot.
938*54fd6939SJiyong Park *****************************************************************************/
psci_secondaries_brought_up(void)939*54fd6939SJiyong Park int psci_secondaries_brought_up(void)
940*54fd6939SJiyong Park {
941*54fd6939SJiyong Park unsigned int idx, n_valid = 0U;
942*54fd6939SJiyong Park
943*54fd6939SJiyong Park for (idx = 0U; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) {
944*54fd6939SJiyong Park if (psci_cpu_pd_nodes[idx].mpidr != PSCI_INVALID_MPIDR)
945*54fd6939SJiyong Park n_valid++;
946*54fd6939SJiyong Park }
947*54fd6939SJiyong Park
948*54fd6939SJiyong Park assert(n_valid > 0U);
949*54fd6939SJiyong Park
950*54fd6939SJiyong Park return (n_valid > 1U) ? 1 : 0;
951*54fd6939SJiyong Park }
952*54fd6939SJiyong Park
953*54fd6939SJiyong Park /*******************************************************************************
954*54fd6939SJiyong Park * Initiate power down sequence, by calling power down operations registered for
955*54fd6939SJiyong Park * this CPU.
956*54fd6939SJiyong Park ******************************************************************************/
psci_do_pwrdown_sequence(unsigned int power_level)957*54fd6939SJiyong Park void psci_do_pwrdown_sequence(unsigned int power_level)
958*54fd6939SJiyong Park {
959*54fd6939SJiyong Park #if HW_ASSISTED_COHERENCY
960*54fd6939SJiyong Park /*
961*54fd6939SJiyong Park * With hardware-assisted coherency, the CPU drivers only initiate the
962*54fd6939SJiyong Park * power down sequence, without performing cache-maintenance operations
963*54fd6939SJiyong Park * in software. Data caches enabled both before and after this call.
964*54fd6939SJiyong Park */
965*54fd6939SJiyong Park prepare_cpu_pwr_dwn(power_level);
966*54fd6939SJiyong Park #else
967*54fd6939SJiyong Park /*
968*54fd6939SJiyong Park * Without hardware-assisted coherency, the CPU drivers disable data
969*54fd6939SJiyong Park * caches, then perform cache-maintenance operations in software.
970*54fd6939SJiyong Park *
971*54fd6939SJiyong Park * This also calls prepare_cpu_pwr_dwn() to initiate power down
972*54fd6939SJiyong Park * sequence, but that function will return with data caches disabled.
973*54fd6939SJiyong Park * We must ensure that the stack memory is flushed out to memory before
974*54fd6939SJiyong Park * we start popping from it again.
975*54fd6939SJiyong Park */
976*54fd6939SJiyong Park psci_do_pwrdown_cache_maintenance(power_level);
977*54fd6939SJiyong Park #endif
978*54fd6939SJiyong Park }
979*54fd6939SJiyong Park
980*54fd6939SJiyong Park /*******************************************************************************
981*54fd6939SJiyong Park * This function invokes the callback 'stop_func()' with the 'mpidr' of each
982*54fd6939SJiyong Park * online PE. Caller can pass suitable method to stop a remote core.
983*54fd6939SJiyong Park *
984*54fd6939SJiyong Park * 'wait_ms' is the timeout value in milliseconds for the other cores to
985*54fd6939SJiyong Park * transition to power down state. Passing '0' makes it non-blocking.
986*54fd6939SJiyong Park *
987*54fd6939SJiyong Park * The function returns 'PSCI_E_DENIED' if some cores failed to stop within the
988*54fd6939SJiyong Park * given timeout.
989*54fd6939SJiyong Park ******************************************************************************/
psci_stop_other_cores(unsigned int wait_ms,void (* stop_func)(u_register_t mpidr))990*54fd6939SJiyong Park int psci_stop_other_cores(unsigned int wait_ms,
991*54fd6939SJiyong Park void (*stop_func)(u_register_t mpidr))
992*54fd6939SJiyong Park {
993*54fd6939SJiyong Park unsigned int idx, this_cpu_idx;
994*54fd6939SJiyong Park
995*54fd6939SJiyong Park this_cpu_idx = plat_my_core_pos();
996*54fd6939SJiyong Park
997*54fd6939SJiyong Park /* Invoke stop_func for each core */
998*54fd6939SJiyong Park for (idx = 0U; idx < psci_plat_core_count; idx++) {
999*54fd6939SJiyong Park /* skip current CPU */
1000*54fd6939SJiyong Park if (idx == this_cpu_idx) {
1001*54fd6939SJiyong Park continue;
1002*54fd6939SJiyong Park }
1003*54fd6939SJiyong Park
1004*54fd6939SJiyong Park /* Check if the CPU is ON */
1005*54fd6939SJiyong Park if (psci_get_aff_info_state_by_idx(idx) == AFF_STATE_ON) {
1006*54fd6939SJiyong Park (*stop_func)(psci_cpu_pd_nodes[idx].mpidr);
1007*54fd6939SJiyong Park }
1008*54fd6939SJiyong Park }
1009*54fd6939SJiyong Park
1010*54fd6939SJiyong Park /* Need to wait for other cores to shutdown */
1011*54fd6939SJiyong Park if (wait_ms != 0U) {
1012*54fd6939SJiyong Park while ((wait_ms-- != 0U) && (psci_is_last_on_cpu() != 0U)) {
1013*54fd6939SJiyong Park mdelay(1U);
1014*54fd6939SJiyong Park }
1015*54fd6939SJiyong Park
1016*54fd6939SJiyong Park if (psci_is_last_on_cpu() != 0U) {
1017*54fd6939SJiyong Park WARN("Failed to stop all cores!\n");
1018*54fd6939SJiyong Park psci_print_power_domain_map();
1019*54fd6939SJiyong Park return PSCI_E_DENIED;
1020*54fd6939SJiyong Park }
1021*54fd6939SJiyong Park }
1022*54fd6939SJiyong Park
1023*54fd6939SJiyong Park return PSCI_E_SUCCESS;
1024*54fd6939SJiyong Park }
1025