1/*
2 * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <cpu_macros.S>
11#include <common/bl_common.h>
12#include <lib/cpus/cpu_ops.h>
13#include <lib/el3_runtime/cpu_data.h>
14
15#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || \
16	(defined(IMAGE_BL2) && RESET_TO_BL2)
17	/*
18	 * The reset handler common to all platforms.  After a matching
19	 * cpu_ops structure entry is found, the correponding reset_handler
20	 * in the cpu_ops is invoked. The reset handler is invoked very early
21	 * in the boot sequence and it is assumed that we can clobber r0 - r10
22	 * without the need to follow AAPCS.
23	 * Clobbers: r0 - r10
24	 */
25	.globl	reset_handler
26func reset_handler
27	mov	r8, lr
28
29	/* The plat_reset_handler can clobber r0 - r7 */
30	bl	plat_reset_handler
31
32	/* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
33	bl	get_cpu_ops_ptr
34
35#if ENABLE_ASSERTIONS
36	cmp	r0, #0
37	ASM_ASSERT(ne)
38#endif
39
40	/* Get the cpu_ops reset handler */
41	ldr	r1, [r0, #CPU_RESET_FUNC]
42	cmp	r1, #0
43	mov	lr, r8
44	bxne	r1
45	bx	lr
46endfunc reset_handler
47
48#endif
49
50#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in  BL32 */
51	/*
52	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
53	 *
54	 * Prepare CPU power down function for all platforms. The function takes
55	 * a domain level to be powered down as its parameter. After the cpu_ops
56	 * pointer is retrieved from cpu_data, the handler for requested power
57	 * level is called.
58	 */
59	.globl	prepare_cpu_pwr_dwn
60func prepare_cpu_pwr_dwn
61	/*
62	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
63	 * power down handler for the last power level
64	 */
65	mov	r2, #(CPU_MAX_PWR_DWN_OPS - 1)
66	cmp	r0, r2
67	movhi	r0, r2
68
69	push	{r0, lr}
70	bl	_cpu_data
71	pop	{r2, lr}
72
73	ldr	r0, [r0, #CPU_DATA_CPU_OPS_PTR]
74#if ENABLE_ASSERTIONS
75	cmp	r0, #0
76	ASM_ASSERT(ne)
77#endif
78
79	/* Get the appropriate power down handler */
80	mov	r1, #CPU_PWR_DWN_OPS
81	add	r1, r1, r2, lsl #2
82	ldr	r1, [r0, r1]
83#if ENABLE_ASSERTIONS
84	cmp	r1, #0
85	ASM_ASSERT(ne)
86#endif
87	bx	r1
88endfunc prepare_cpu_pwr_dwn
89
90	/*
91	 * Initializes the cpu_ops_ptr if not already initialized
92	 * in cpu_data. This must only be called after the data cache
93	 * is enabled. AAPCS is followed.
94	 */
95	.globl	init_cpu_ops
96func init_cpu_ops
97	push	{r4 - r6, lr}
98	bl	_cpu_data
99	mov	r6, r0
100	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
101	cmp	r1, #0
102	bne	1f
103	bl	get_cpu_ops_ptr
104#if ENABLE_ASSERTIONS
105	cmp	r0, #0
106	ASM_ASSERT(ne)
107#endif
108	str	r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
1091:
110	pop	{r4 - r6, pc}
111endfunc init_cpu_ops
112
113#endif /* IMAGE_BL32 */
114
115	/*
116	 * The below function returns the cpu_ops structure matching the
117	 * midr of the core. It reads the MIDR and finds the matching
118	 * entry in cpu_ops entries. Only the implementation and part number
119	 * are used to match the entries.
120	 * Return :
121	 *     r0 - The matching cpu_ops pointer on Success
122	 *     r0 - 0 on failure.
123	 * Clobbers: r0 - r5
124	 */
125	.globl	get_cpu_ops_ptr
126func get_cpu_ops_ptr
127	/* Get the cpu_ops start and end locations */
128	ldr	r4, =(__CPU_OPS_START__ + CPU_MIDR)
129	ldr	r5, =(__CPU_OPS_END__ + CPU_MIDR)
130
131	/* Initialize the return parameter */
132	mov	r0, #0
133
134	/* Read the MIDR_EL1 */
135	ldcopr	r2, MIDR
136	ldr	r3, =CPU_IMPL_PN_MASK
137
138	/* Retain only the implementation and part number using mask */
139	and	r2, r2, r3
1401:
141	/* Check if we have reached end of list */
142	cmp	r4, r5
143	bhs	error_exit
144
145	/* load the midr from the cpu_ops */
146	ldr	r1, [r4], #CPU_OPS_SIZE
147	and	r1, r1, r3
148
149	/* Check if midr matches to midr of this core */
150	cmp	r1, r2
151	bne	1b
152
153	/* Subtract the increment and offset to get the cpu-ops pointer */
154	sub	r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
155#if ENABLE_ASSERTIONS
156	cmp	r0, #0
157	ASM_ASSERT(ne)
158#endif
159error_exit:
160	bx	lr
161endfunc get_cpu_ops_ptr
162
163/*
164 * Extract CPU revision and variant, and combine them into a single numeric for
165 * easier comparison.
166 */
167	.globl	cpu_get_rev_var
168func cpu_get_rev_var
169	ldcopr	r1, MIDR
170
171	/*
172	 * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
173	 * r0[0:7] as variant[7:4] and revision[3:0]:
174	 *
175	 * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
176	 * extract r1[3:0] into r0[3:0] retaining other bits.
177	 */
178	ubfx	r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
179	bfi	r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
180	bx	lr
181endfunc cpu_get_rev_var
182
183/*
184 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
185 * application purposes. If the revision-variant is less than or same as a given
186 * value, indicates that errata applies; otherwise not.
187 */
188	.globl	cpu_rev_var_ls
189func cpu_rev_var_ls
190	cmp	r0, r1
191	movls	r0, #ERRATA_APPLIES
192	movhi	r0, #ERRATA_NOT_APPLIES
193	bx	lr
194endfunc cpu_rev_var_ls
195
196/*
197 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
198 * application purposes. If the revision-variant is higher than or same as a
199 * given value, indicates that errata applies; otherwise not.
200 */
201	.globl	cpu_rev_var_hs
202func cpu_rev_var_hs
203	cmp	r0, r1
204	movge	r0, #ERRATA_APPLIES
205	movlt	r0, #ERRATA_NOT_APPLIES
206	bx	lr
207endfunc cpu_rev_var_hs
208