1 /*
2 * Copyright (c) 2023-2024, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include "cpu_errata_info.h"
9 #include <lib/cpus/cpu_ops.h>
10 #include <lib/cpus/errata.h>
11 #include <lib/smccc.h>
12 #include <lib/utils_def.h>
13 #include <services/errata_abi_svc.h>
14 #include <smccc_helpers.h>
15
16 /*
17 * Global pointer that points to the specific
18 * structure based on the MIDR part number
19 */
20 struct em_cpu_list *cpu_ptr;
21
22 /* Structure array that holds CPU specific errata information */
23 struct em_cpu_list cpu_list[] = {
24 #if CORTEX_A78_H_INC
25 {
26 .cpu_partnumber = CORTEX_A78_MIDR,
27 .cpu_errata_list = {
28 [0] = {2712571, 0x00, 0x12},
29 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
30 }
31 },
32 #endif /* CORTEX_A78_H_INC */
33
34 #if CORTEX_A78_AE_H_INC
35 {
36 .cpu_partnumber = CORTEX_A78_AE_MIDR,
37 .cpu_errata_list = {
38 [0] = {2712574, 0x00, 0x02},
39 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
40 }
41 },
42 #endif /* CORTEX_A78_AE_H_INC */
43
44 #if CORTEX_A78C_H_INC
45 {
46 .cpu_partnumber = CORTEX_A78C_MIDR,
47 .cpu_errata_list = {
48 [0] = {2712575, 0x01, 0x02},
49 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
50 }
51 },
52 #endif /* CORTEX_A78C_H_INC */
53
54 #if NEOVERSE_V1_H_INC
55 {
56 .cpu_partnumber = NEOVERSE_V1_MIDR,
57 .cpu_errata_list = {
58 [0] = {2701953, 0x00, 0x11},
59 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
60 }
61 },
62 #endif /* NEOVERSE_V1_H_INC */
63
64 #if CORTEX_A710_H_INC
65 {
66 .cpu_partnumber = CORTEX_A710_MIDR,
67 .cpu_errata_list = {
68 [0] = {2701952, 0x00, 0x21},
69 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
70 }
71 },
72 #endif /* CORTEX_A710_H_INC */
73
74 #if NEOVERSE_N2_H_INC
75 {
76 .cpu_partnumber = NEOVERSE_N2_MIDR,
77 .cpu_errata_list = {
78 [0] = {2728475, 0x00, 0x02},
79 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
80 }
81 },
82 #endif /* NEOVERSE_N2_H_INC */
83
84 #if CORTEX_X2_H_INC
85 {
86 .cpu_partnumber = CORTEX_X2_MIDR,
87 .cpu_errata_list = {
88 [0] = {2701952, 0x00, 0x21},
89 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
90 }
91 },
92 #endif /* CORTEX_X2_H_INC */
93
94 #if NEOVERSE_V2_H_INC
95 {
96 .cpu_partnumber = NEOVERSE_V2_MIDR,
97 .cpu_errata_list = {
98 [0] = {2719103, 0x00, 0x01},
99 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
100 }
101 },
102 #endif /* NEOVERSE_V2_H_INC */
103
104 #if CORTEX_X3_H_INC
105 {
106 .cpu_partnumber = CORTEX_X3_MIDR,
107 .cpu_errata_list = {
108 [0] = {2701951, 0x00, 0x11},
109 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
110 }
111 },
112 #endif /* CORTEX_X3_H_INC */
113
114 #if CORTEX_X4_H_INC
115 {
116 .cpu_partnumber = CORTEX_X4_MIDR,
117 .cpu_errata_list = {
118 [0] = {2701112, 0x00, 0x00},
119 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
120 }
121 },
122 #endif /* CORTEX_X4_H_INC */
123
124 };
125
126 #if ERRATA_NON_ARM_INTERCONNECT
127
128 /* Check if the errata is enabled for non-arm interconnect */
non_arm_interconnect_errata(uint32_t errata_id,long rev_var)129 static int32_t non_arm_interconnect_errata(uint32_t errata_id, long rev_var)
130 {
131 int32_t ret_val = EM_UNKNOWN_ERRATUM;
132
133 /* Determine the number of cpu listed in the cpu list */
134 uint8_t size_cpulist = ARRAY_SIZE(cpu_list);
135
136 /* Read the midr reg to extract cpu, revision and variant info */
137 uint32_t midr_val = read_midr();
138
139 for (uint8_t i = 0U; i < size_cpulist; i++) {
140 cpu_ptr = &cpu_list[i];
141 /*
142 * If the cpu partnumber in the cpu list, matches the midr
143 * part number, check to see if the errata ID matches
144 */
145 if (EXTRACT_PARTNUM(midr_val) == EXTRACT_PARTNUM(cpu_ptr->cpu_partnumber)) {
146
147 struct em_cpu *ptr = NULL;
148
149 for (int j = 0; j < MAX_PLAT_CPU_ERRATA_ENTRIES; j++) {
150 ptr = &cpu_ptr->cpu_errata_list[j];
151 assert(ptr != NULL);
152 if (errata_id == ptr->em_errata_id) {
153 if (RXPX_RANGE(rev_var, ptr->em_rxpx_lo, ptr->em_rxpx_hi)) {
154 ret_val = EM_AFFECTED;
155 break;
156 }
157 ret_val = EM_NOT_AFFECTED;
158 break;
159 }
160 }
161 break;
162 }
163 }
164 return ret_val;
165 }
166 #endif
167
168 /* Function to check if the errata exists for the specific CPU and rxpx */
verify_errata_implemented(uint32_t errata_id,uint32_t forward_flag)169 int32_t verify_errata_implemented(uint32_t errata_id, uint32_t forward_flag)
170 {
171 int32_t ret_val;
172 struct cpu_ops *cpu_ops;
173 struct erratum_entry *entry, *end;
174 long rev_var;
175
176 ret_val = EM_UNKNOWN_ERRATUM;
177 rev_var = cpu_get_rev_var();
178
179 #if ERRATA_NON_ARM_INTERCONNECT
180 ret_val = non_arm_interconnect_errata(errata_id, rev_var);
181 if (ret_val != EM_UNKNOWN_ERRATUM) {
182 return ret_val;
183 }
184 #endif
185
186 cpu_ops = get_cpu_ops_ptr();
187 assert(cpu_ops != NULL);
188
189 entry = cpu_ops->errata_list_start;
190 assert(entry != NULL);
191
192 end = cpu_ops->errata_list_end;
193 assert(end != NULL);
194
195 end--; /* point to the last erratum entry of the queried cpu */
196
197 while ((entry <= end) && (ret_val == EM_UNKNOWN_ERRATUM)) {
198 if (entry->id == errata_id) {
199 if (entry->check_func(rev_var)) {
200 if (entry->chosen)
201 return EM_HIGHER_EL_MITIGATION;
202 else
203 return EM_AFFECTED;
204 }
205 return EM_NOT_AFFECTED;
206 }
207 entry += 1;
208 }
209 return ret_val;
210 }
211
212 /* Predicate indicating that a function id is part of EM_ABI */
is_errata_fid(uint32_t smc_fid)213 bool is_errata_fid(uint32_t smc_fid)
214 {
215 return ((smc_fid == ARM_EM_VERSION) ||
216 (smc_fid == ARM_EM_FEATURES) ||
217 (smc_fid == ARM_EM_CPU_ERRATUM_FEATURES));
218
219 }
220
validate_spsr_mode(void)221 bool validate_spsr_mode(void)
222 {
223 /* In AArch64, if the caller is EL1, return true */
224
225 #if __aarch64__
226 if (GET_EL(read_spsr_el3()) == MODE_EL1) {
227 return true;
228 }
229 return false;
230 #else
231
232 /* In AArch32, if in system/svc mode, return true */
233 uint8_t read_el_state = GET_M32(read_spsr());
234
235 if ((read_el_state == (MODE32_svc)) || (read_el_state == MODE32_sys)) {
236 return true;
237 }
238 return false;
239 #endif /* __aarch64__ */
240 }
241
errata_abi_smc_handler(uint32_t smc_fid,u_register_t x1,u_register_t x2,u_register_t x3,u_register_t x4,void * cookie,void * handle,u_register_t flags)242 uintptr_t errata_abi_smc_handler(uint32_t smc_fid, u_register_t x1,
243 u_register_t x2, u_register_t x3, u_register_t x4,
244 void *cookie, void *handle, u_register_t flags)
245 {
246 int32_t ret_id = EM_UNKNOWN_ERRATUM;
247
248 switch (smc_fid) {
249 case ARM_EM_VERSION:
250 SMC_RET1(handle, MAKE_SMCCC_VERSION(
251 EM_VERSION_MAJOR, EM_VERSION_MINOR
252 ));
253 break; /* unreachable */
254 case ARM_EM_FEATURES:
255 if (is_errata_fid((uint32_t)x1)) {
256 SMC_RET1(handle, EM_SUCCESS);
257 }
258
259 SMC_RET1(handle, EM_NOT_SUPPORTED);
260 break; /* unreachable */
261 case ARM_EM_CPU_ERRATUM_FEATURES:
262
263 /*
264 * If the forward flag is greater than zero and the calling EL
265 * is EL1 in AArch64 or in system mode or svc mode in case of AArch32,
266 * return Invalid Parameters.
267 */
268 if (((uint32_t)x2 != 0) && (validate_spsr_mode())) {
269 SMC_RET1(handle, EM_INVALID_PARAMETERS);
270 }
271 ret_id = verify_errata_implemented((uint32_t)x1, (uint32_t)x2);
272 SMC_RET1(handle, ret_id);
273 break; /* unreachable */
274 default:
275 {
276 WARN("Unimplemented Errata ABI Service Call: 0x%x\n", smc_fid);
277 SMC_RET1(handle, EM_UNKNOWN_ERRATUM);
278 break; /* unreachable */
279 }
280 }
281 }
282