1 /*
2 * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
3 * Copyright (c) 2023, NVIDIA Corporation. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8 #include <assert.h>
9 #include <string.h>
10
11 #include <arch.h>
12 #include <arch_helpers.h>
13 #include <common/debug.h>
14 #include <lib/pmf/pmf.h>
15 #include <lib/runtime_instr.h>
16 #include <plat/common/platform.h>
17
18 #include "psci_private.h"
19
20 /******************************************************************************
21 * Construct the psci_power_state to request power OFF at all power levels.
22 ******************************************************************************/
psci_set_power_off_state(psci_power_state_t * state_info)23 static void psci_set_power_off_state(psci_power_state_t *state_info)
24 {
25 unsigned int lvl;
26
27 for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++)
28 state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE;
29 }
30
31 /******************************************************************************
32 * Top level handler which is called when a cpu wants to power itself down.
33 * It's assumed that along with turning the cpu power domain off, power
34 * domains at higher levels will be turned off as far as possible. It finds
35 * the highest level where a domain has to be powered off by traversing the
36 * node information and then performs generic, architectural, platform setup
37 * and state management required to turn OFF that power domain and domains
38 * below it. e.g. For a cpu that's to be powered OFF, it could mean programming
39 * the power controller whereas for a cluster that's to be powered off, it will
40 * call the platform specific code which will disable coherency at the
41 * interconnect level if the cpu is the last in the cluster and also the
42 * program the power controller.
43 ******************************************************************************/
psci_do_cpu_off(unsigned int end_pwrlvl)44 int psci_do_cpu_off(unsigned int end_pwrlvl)
45 {
46 int rc = PSCI_E_SUCCESS;
47 unsigned int idx = plat_my_core_pos();
48 psci_power_state_t state_info;
49 unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
50
51 /*
52 * This function must only be called on platforms where the
53 * CPU_OFF platform hooks have been implemented.
54 */
55 assert(psci_plat_pm_ops->pwr_domain_off != NULL);
56
57 /* Construct the psci_power_state for CPU_OFF */
58 psci_set_power_off_state(&state_info);
59
60 /*
61 * Call the platform provided early CPU_OFF handler to allow
62 * platforms to perform any housekeeping activities before
63 * actually powering the CPU off. PSCI_E_DENIED indicates that
64 * the CPU off sequence should be aborted at this time.
65 */
66 if (psci_plat_pm_ops->pwr_domain_off_early) {
67 rc = psci_plat_pm_ops->pwr_domain_off_early(&state_info);
68 if (rc == PSCI_E_DENIED) {
69 return rc;
70 }
71 }
72
73 /*
74 * Get the parent nodes here, this is important to do before we
75 * initiate the power down sequence as after that point the core may
76 * have exited coherency and its cache may be disabled, any access to
77 * shared memory after that (such as the parent node lookup in
78 * psci_cpu_pd_nodes) can cause coherency issues on some platforms.
79 */
80 psci_get_parent_pwr_domain_nodes(idx, end_pwrlvl, parent_nodes);
81
82 /*
83 * This function acquires the lock corresponding to each power
84 * level so that by the time all locks are taken, the system topology
85 * is snapshot and state management can be done safely.
86 */
87 psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
88
89 /*
90 * Call the cpu off handler registered by the Secure Payload Dispatcher
91 * to let it do any bookkeeping. Assume that the SPD always reports an
92 * E_DENIED error if SP refuse to power down
93 */
94 if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_off != NULL)) {
95 rc = psci_spd_pm->svc_off(0);
96 if (rc != 0)
97 goto exit;
98 }
99
100 /*
101 * This function is passed the requested state info and
102 * it returns the negotiated state info for each power level upto
103 * the end level specified.
104 */
105 psci_do_state_coordination(end_pwrlvl, &state_info);
106
107 /* Update the target state in the power domain nodes */
108 psci_set_target_local_pwr_states(end_pwrlvl, &state_info);
109
110 #if ENABLE_PSCI_STAT
111 /* Update the last cpu for each level till end_pwrlvl */
112 psci_stats_update_pwr_down(end_pwrlvl, &state_info);
113 #endif
114
115 #if ENABLE_RUNTIME_INSTRUMENTATION
116
117 /*
118 * Flush cache line so that even if CPU power down happens
119 * the timestamp update is reflected in memory.
120 */
121 PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
122 RT_INSTR_ENTER_CFLUSH,
123 PMF_CACHE_MAINT);
124 #endif
125
126 /*
127 * Arch. management. Initiate power down sequence.
128 */
129 psci_pwrdown_cpu(psci_find_max_off_lvl(&state_info));
130
131 #if ENABLE_RUNTIME_INSTRUMENTATION
132 PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
133 RT_INSTR_EXIT_CFLUSH,
134 PMF_NO_CACHE_MAINT);
135 #endif
136
137 /*
138 * Plat. management: Perform platform specific actions to turn this
139 * cpu off e.g. exit cpu coherency, program the power controller etc.
140 */
141 psci_plat_pm_ops->pwr_domain_off(&state_info);
142
143 #if ENABLE_PSCI_STAT
144 plat_psci_stat_accounting_start(&state_info);
145 #endif
146
147 exit:
148 /*
149 * Release the locks corresponding to each power level in the
150 * reverse order to which they were acquired.
151 */
152 psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
153
154 /*
155 * Check if all actions needed to safely power down this cpu have
156 * successfully completed.
157 */
158 if (rc == PSCI_E_SUCCESS) {
159 /*
160 * Set the affinity info state to OFF. When caches are disabled,
161 * this writes directly to main memory, so cache maintenance is
162 * required to ensure that later cached reads of aff_info_state
163 * return AFF_STATE_OFF. A dsbish() ensures ordering of the
164 * update to the affinity info state prior to cache line
165 * invalidation.
166 */
167 psci_flush_cpu_data(psci_svc_cpu_data.aff_info_state);
168 psci_set_aff_info_state(AFF_STATE_OFF);
169 psci_dsbish();
170 psci_inv_cpu_data(psci_svc_cpu_data.aff_info_state);
171
172 #if ENABLE_RUNTIME_INSTRUMENTATION
173
174 /*
175 * Update the timestamp with cache off. We assume this
176 * timestamp can only be read from the current CPU and the
177 * timestamp cache line will be flushed before return to
178 * normal world on wakeup.
179 */
180 PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
181 RT_INSTR_ENTER_HW_LOW_PWR,
182 PMF_NO_CACHE_MAINT);
183 #endif
184
185 if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi != NULL) {
186 /* This function must not return */
187 psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info);
188 } else {
189 /*
190 * Enter a wfi loop which will allow the power
191 * controller to physically power down this cpu.
192 */
193 psci_power_down_wfi();
194 }
195 }
196
197 return rc;
198 }
199