1 /*
2 * Copyright (c) 2022, Xilinx, Inc. All rights reserved.
3 * Copyright (c) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8 /*
9 * APU specific definition of processors in the subsystem as well as functions
10 * for getting information about and changing state of the APU.
11 */
12
13 #include <assert.h>
14
15 #include <drivers/arm/gic_common.h>
16 #include <drivers/arm/gicv3.h>
17 #include <lib/bakery_lock.h>
18 #include <lib/mmio.h>
19 #include <lib/spinlock.h>
20 #include <lib/utils.h>
21 #include <plat/common/platform.h>
22
23 #include <plat_ipi.h>
24 #include <platform_def.h>
25 #include "pm_api_sys.h"
26 #include "pm_client.h"
27 #include <versal_net_def.h>
28
29 #define UNDEFINED_CPUID (~0)
30
31 DEFINE_RENAME_SYSREG_RW_FUNCS(cpu_pwrctrl_val, S3_0_C15_C2_7)
32
33 /*
34 * ARM v8.2, the cache will turn off automatically when cpu
35 * power down. Therefore, there is no doubt to use the spin_lock here.
36 */
37 #if !HW_ASSISTED_COHERENCY
38 DEFINE_BAKERY_LOCK(pm_client_secure_lock);
pm_client_lock_get(void)39 static inline void pm_client_lock_get(void)
40 {
41 bakery_lock_get(&pm_client_secure_lock);
42 }
43
pm_client_lock_release(void)44 static inline void pm_client_lock_release(void)
45 {
46 bakery_lock_release(&pm_client_secure_lock);
47 }
48 #else
49 spinlock_t pm_client_secure_lock;
50 static inline void pm_client_lock_get(void)
51 {
52 spin_lock(&pm_client_secure_lock);
53 }
54
55 static inline void pm_client_lock_release(void)
56 {
57 spin_unlock(&pm_client_secure_lock);
58 }
59 #endif
60
61 static const struct pm_ipi apu_ipi = {
62 .local_ipi_id = IPI_LOCAL_ID,
63 .remote_ipi_id = IPI_REMOTE_ID,
64 .buffer_base = IPI_BUFFER_LOCAL_BASE,
65 };
66
67 /* Order in pm_procs_all array must match cpu ids */
68 static const struct pm_proc pm_procs_all[] = {
69 {
70 .node_id = PM_DEV_CLUSTER0_ACPU_0,
71 .ipi = &apu_ipi,
72 .pwrdn_mask = 0,
73 },
74 {
75 .node_id = PM_DEV_CLUSTER0_ACPU_1,
76 .ipi = &apu_ipi,
77 .pwrdn_mask = 0,
78 },
79 {
80 .node_id = PM_DEV_CLUSTER0_ACPU_2,
81 .ipi = &apu_ipi,
82 .pwrdn_mask = 0,
83 },
84 {
85 .node_id = PM_DEV_CLUSTER0_ACPU_3,
86 .ipi = &apu_ipi,
87 .pwrdn_mask = 0,
88 },
89 {
90 .node_id = PM_DEV_CLUSTER1_ACPU_0,
91 .ipi = &apu_ipi,
92 .pwrdn_mask = 0,
93 },
94 {
95 .node_id = PM_DEV_CLUSTER1_ACPU_1,
96 .ipi = &apu_ipi,
97 .pwrdn_mask = 0,
98 },
99 {
100 .node_id = PM_DEV_CLUSTER1_ACPU_2,
101 .ipi = &apu_ipi,
102 .pwrdn_mask = 0,
103 },
104 {
105 .node_id = PM_DEV_CLUSTER1_ACPU_3,
106 .ipi = &apu_ipi,
107 .pwrdn_mask = 0,
108 },
109 {
110 .node_id = PM_DEV_CLUSTER2_ACPU_0,
111 .ipi = &apu_ipi,
112 .pwrdn_mask = 0,
113 },
114 {
115 .node_id = PM_DEV_CLUSTER2_ACPU_1,
116 .ipi = &apu_ipi,
117 .pwrdn_mask = 0,
118 },
119 {
120 .node_id = PM_DEV_CLUSTER2_ACPU_2,
121 .ipi = &apu_ipi,
122 .pwrdn_mask = 0,
123 },
124 {
125 .node_id = PM_DEV_CLUSTER2_ACPU_3,
126 .ipi = &apu_ipi,
127 .pwrdn_mask = 0,
128 },
129 {
130 .node_id = PM_DEV_CLUSTER3_ACPU_0,
131 .ipi = &apu_ipi,
132 .pwrdn_mask = 0,
133 },
134 {
135 .node_id = PM_DEV_CLUSTER3_ACPU_1,
136 .ipi = &apu_ipi,
137 .pwrdn_mask = 0,
138 },
139 {
140 .node_id = PM_DEV_CLUSTER3_ACPU_2,
141 .ipi = &apu_ipi,
142 .pwrdn_mask = 0,
143 },
144 {
145 .node_id = PM_DEV_CLUSTER3_ACPU_3,
146 .ipi = &apu_ipi,
147 .pwrdn_mask = 0,
148 }
149 };
150
151 const struct pm_proc *primary_proc = &pm_procs_all[0];
152
153 /**
154 * pm_get_proc() - returns pointer to the proc structure.
155 * @cpuid: id of the cpu whose proc struct pointer should be returned.
156 *
157 * Return: Pointer to a proc structure if proc is found, otherwise NULL.
158 *
159 */
pm_get_proc(uint32_t cpuid)160 const struct pm_proc *pm_get_proc(uint32_t cpuid)
161 {
162 if (cpuid < ARRAY_SIZE(pm_procs_all)) {
163 return &pm_procs_all[cpuid];
164 }
165
166 NOTICE("ERROR: cpuid: %d proc NULL\n", cpuid);
167 return NULL;
168 }
169
170 /**
171 * irq_to_pm_node_idx - Get PM node index corresponding to the interrupt number.
172 * @irq: Interrupt number.
173 *
174 * Return: PM node index corresponding to the specified interrupt.
175 *
176 */
irq_to_pm_node_idx(uint32_t irq)177 enum pm_device_node_idx irq_to_pm_node_idx(uint32_t irq)
178 {
179 enum pm_device_node_idx dev_idx = XPM_NODEIDX_DEV_MIN;
180
181 assert(irq <= IRQ_MAX);
182
183 switch (irq) {
184 case 20:
185 dev_idx = XPM_NODEIDX_DEV_GPIO;
186 break;
187 case 21:
188 dev_idx = XPM_NODEIDX_DEV_I2C_0;
189 break;
190 case 22:
191 dev_idx = XPM_NODEIDX_DEV_I2C_1;
192 break;
193 case 23:
194 dev_idx = XPM_NODEIDX_DEV_SPI_0;
195 break;
196 case 24:
197 dev_idx = XPM_NODEIDX_DEV_SPI_1;
198 break;
199 case 25:
200 dev_idx = XPM_NODEIDX_DEV_UART_0;
201 break;
202 case 26:
203 dev_idx = XPM_NODEIDX_DEV_UART_1;
204 break;
205 case 27:
206 dev_idx = XPM_NODEIDX_DEV_CAN_FD_0;
207 break;
208 case 28:
209 dev_idx = XPM_NODEIDX_DEV_CAN_FD_1;
210 break;
211 case 29:
212 case 30:
213 case 31:
214 case 32:
215 case 33:
216 case 98:
217 dev_idx = XPM_NODEIDX_DEV_USB_0;
218 break;
219 case 34:
220 case 35:
221 case 36:
222 case 37:
223 case 38:
224 case 99:
225 dev_idx = XPM_NODEIDX_DEV_USB_1;
226 break;
227 case 39:
228 case 40:
229 dev_idx = XPM_NODEIDX_DEV_GEM_0;
230 break;
231 case 41:
232 case 42:
233 dev_idx = XPM_NODEIDX_DEV_GEM_1;
234 break;
235 case 43:
236 case 44:
237 case 45:
238 dev_idx = XPM_NODEIDX_DEV_TTC_0;
239 break;
240 case 46:
241 case 47:
242 case 48:
243 dev_idx = XPM_NODEIDX_DEV_TTC_1;
244 break;
245 case 49:
246 case 50:
247 case 51:
248 dev_idx = XPM_NODEIDX_DEV_TTC_2;
249 break;
250 case 52:
251 case 53:
252 case 54:
253 dev_idx = XPM_NODEIDX_DEV_TTC_3;
254 break;
255 case 72:
256 dev_idx = XPM_NODEIDX_DEV_ADMA_0;
257 break;
258 case 73:
259 dev_idx = XPM_NODEIDX_DEV_ADMA_1;
260 break;
261 case 74:
262 dev_idx = XPM_NODEIDX_DEV_ADMA_2;
263 break;
264 case 75:
265 dev_idx = XPM_NODEIDX_DEV_ADMA_3;
266 break;
267 case 76:
268 dev_idx = XPM_NODEIDX_DEV_ADMA_4;
269 break;
270 case 77:
271 dev_idx = XPM_NODEIDX_DEV_ADMA_5;
272 break;
273 case 78:
274 dev_idx = XPM_NODEIDX_DEV_ADMA_6;
275 break;
276 case 79:
277 dev_idx = XPM_NODEIDX_DEV_ADMA_7;
278 break;
279 case 184:
280 case 185:
281 dev_idx = XPM_NODEIDX_DEV_SDIO_0;
282 break;
283 case 186:
284 case 187:
285 dev_idx = XPM_NODEIDX_DEV_SDIO_1;
286 break;
287 case 200:
288 dev_idx = XPM_NODEIDX_DEV_RTC;
289 break;
290 default:
291 dev_idx = XPM_NODEIDX_DEV_MIN;
292 break;
293 }
294
295 return dev_idx;
296 }
297
298 /**
299 * pm_client_suspend() - Client-specific suspend actions. This function
300 * should contain any PU-specific actions required
301 * prior to sending suspend request to PMU. Actions
302 * taken depend on the state system is suspending to.
303 * @proc: processor which need to suspend.
304 * @state: desired suspend state.
305 *
306 */
pm_client_suspend(const struct pm_proc * proc,uint32_t state)307 void pm_client_suspend(const struct pm_proc *proc, uint32_t state)
308 {
309 uint32_t cpu_id = plat_my_core_pos();
310 uintptr_t val;
311
312 pm_client_lock_get();
313
314 if (state == PM_STATE_SUSPEND_TO_RAM) {
315 pm_client_set_wakeup_sources((uint32_t)proc->node_id);
316 }
317
318 val = read_cpu_pwrctrl_val();
319 val |= CORE_PWRDN_EN_BIT_MASK;
320 write_cpu_pwrctrl_val(val);
321
322 isb();
323
324 /* Enable power down interrupt */
325 mmio_write_32(APU_PCIL_CORE_X_IEN_POWER_REG(cpu_id),
326 APU_PCIL_CORE_X_IEN_POWER_MASK);
327 /* Enable wake interrupt */
328 mmio_write_32(APU_PCIL_CORE_X_IEN_WAKE_REG(cpu_id),
329 APU_PCIL_CORE_X_IEN_WAKE_MASK);
330
331 pm_client_lock_release();
332 }
333
334 /**
335 * pm_get_cpuid() - get the local cpu ID for a global node ID.
336 * @nid: node id of the processor.
337 *
338 * Return: the cpu ID (starting from 0) for the subsystem.
339 *
340 */
pm_get_cpuid(uint32_t nid)341 static uint32_t pm_get_cpuid(uint32_t nid)
342 {
343 for (size_t i = 0; i < ARRAY_SIZE(pm_procs_all); i++) {
344 if (pm_procs_all[i].node_id == nid) {
345 return i;
346 }
347 }
348 return UNDEFINED_CPUID;
349 }
350
351 /**
352 * pm_client_wakeup() - Client-specific wakeup actions.
353 * @proc: Processor which need to wakeup.
354 *
355 * This function should contain any PU-specific actions
356 * required for waking up another APU core.
357 *
358 */
pm_client_wakeup(const struct pm_proc * proc)359 void pm_client_wakeup(const struct pm_proc *proc)
360 {
361 uint32_t cpuid = pm_get_cpuid(proc->node_id);
362 uintptr_t val;
363
364 if (cpuid == UNDEFINED_CPUID) {
365 return;
366 }
367
368 pm_client_lock_get();
369
370 /* Clear powerdown request */
371 val = read_cpu_pwrctrl_val();
372 val &= ~CORE_PWRDN_EN_BIT_MASK;
373 write_cpu_pwrctrl_val(val);
374
375 isb();
376
377 /* Disabled power down interrupt */
378 mmio_write_32(APU_PCIL_CORE_X_IDS_POWER_REG(cpuid),
379 APU_PCIL_CORE_X_IDS_POWER_MASK);
380 /* Disable wake interrupt */
381 mmio_write_32(APU_PCIL_CORE_X_IDS_WAKE_REG(cpuid),
382 APU_PCIL_CORE_X_IDS_WAKE_MASK);
383
384 pm_client_lock_release();
385 }
386
387 /**
388 * pm_client_abort_suspend() - Client-specific abort-suspend actions.
389 *
390 * This function should contain any PU-specific actions
391 * required for aborting a prior suspend request.
392 *
393 */
pm_client_abort_suspend(void)394 void pm_client_abort_suspend(void)
395 {
396 uint32_t cpu_id = plat_my_core_pos();
397 uintptr_t val;
398
399 /* Enable interrupts at processor level (for current cpu) */
400 gicv3_cpuif_enable(plat_my_core_pos());
401
402 pm_client_lock_get();
403
404 /* Clear powerdown request */
405 val = read_cpu_pwrctrl_val();
406 val &= ~CORE_PWRDN_EN_BIT_MASK;
407 write_cpu_pwrctrl_val(val);
408
409 isb();
410
411 /* Disabled power down interrupt */
412 mmio_write_32(APU_PCIL_CORE_X_IDS_POWER_REG(cpu_id),
413 APU_PCIL_CORE_X_IDS_POWER_MASK);
414
415 pm_client_lock_release();
416 }
417