1 /*
2 * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8
9 #include <platform_def.h>
10
11 #include <arch.h>
12 #include <bl1/bl1.h>
13 #include <common/bl_common.h>
14 #include <common/debug.h>
15 #include <lib/fconf/fconf.h>
16 #include <lib/fconf/fconf_dyn_cfg_getter.h>
17 #if TRANSFER_LIST
18 #include <lib/transfer_list.h>
19 #endif
20 #include <lib/utils.h>
21 #include <lib/xlat_tables/xlat_tables_compat.h>
22 #include <plat/arm/common/plat_arm.h>
23 #include <plat/common/platform.h>
24
25 /* Weak definitions may be overridden in specific ARM standard platform */
26 #pragma weak bl1_early_platform_setup
27 #pragma weak bl1_plat_arch_setup
28 #pragma weak bl1_plat_sec_mem_layout
29 #pragma weak arm_bl1_early_platform_setup
30 #pragma weak bl1_plat_prepare_exit
31 #pragma weak bl1_plat_get_next_image_id
32 #pragma weak plat_arm_bl1_fwu_needed
33 #pragma weak arm_bl1_plat_arch_setup
34 #pragma weak arm_bl1_platform_setup
35
36 #define MAP_BL1_TOTAL MAP_REGION_FLAT( \
37 bl1_tzram_layout.total_base, \
38 bl1_tzram_layout.total_size, \
39 MT_MEMORY | MT_RW | EL3_PAS)
40 /*
41 * If SEPARATE_CODE_AND_RODATA=1 we define a region for each section
42 * otherwise one region is defined containing both
43 */
44 #if SEPARATE_CODE_AND_RODATA
45 #define MAP_BL1_RO MAP_REGION_FLAT( \
46 BL_CODE_BASE, \
47 BL1_CODE_END - BL_CODE_BASE, \
48 MT_CODE | EL3_PAS), \
49 MAP_REGION_FLAT( \
50 BL1_RO_DATA_BASE, \
51 BL1_RO_DATA_END \
52 - BL_RO_DATA_BASE, \
53 MT_RO_DATA | EL3_PAS)
54 #else
55 #define MAP_BL1_RO MAP_REGION_FLAT( \
56 BL_CODE_BASE, \
57 BL1_CODE_END - BL_CODE_BASE, \
58 MT_CODE | EL3_PAS)
59 #endif
60
61 /* Data structure which holds the extents of the trusted SRAM for BL1*/
62 static meminfo_t bl1_tzram_layout;
63
64 /* Boolean variable to hold condition whether firmware update needed or not */
65 static bool is_fwu_needed;
66
67 #if TRANSFER_LIST
68 static struct transfer_list_header *secure_tl;
69 #endif
70
bl1_plat_sec_mem_layout(void)71 struct meminfo *bl1_plat_sec_mem_layout(void)
72 {
73 return &bl1_tzram_layout;
74 }
75
76 /*******************************************************************************
77 * BL1 specific platform actions shared between ARM standard platforms.
78 ******************************************************************************/
arm_bl1_early_platform_setup(void)79 void arm_bl1_early_platform_setup(void)
80 {
81
82 #if !ARM_DISABLE_TRUSTED_WDOG
83 /* Enable watchdog */
84 plat_arm_secure_wdt_start();
85 #endif
86
87 /* Initialize the console to provide early debug support */
88 arm_console_boot_init();
89
90 /* Allow BL1 to see the whole Trusted RAM */
91 bl1_tzram_layout.total_base = ARM_BL_RAM_BASE;
92 bl1_tzram_layout.total_size = ARM_BL_RAM_SIZE;
93 }
94
bl1_early_platform_setup(void)95 void bl1_early_platform_setup(void)
96 {
97 arm_bl1_early_platform_setup();
98
99 /*
100 * Initialize Interconnect for this cluster during cold boot.
101 * No need for locks as no other CPU is active.
102 */
103 plat_arm_interconnect_init();
104 /*
105 * Enable Interconnect coherency for the primary CPU's cluster.
106 */
107 plat_arm_interconnect_enter_coherency();
108 }
109
110 /******************************************************************************
111 * Perform the very early platform specific architecture setup shared between
112 * ARM standard platforms. This only does basic initialization. Later
113 * architectural setup (bl1_arch_setup()) does not do anything platform
114 * specific.
115 *****************************************************************************/
arm_bl1_plat_arch_setup(void)116 void arm_bl1_plat_arch_setup(void)
117 {
118 #if USE_COHERENT_MEM
119 /* Ensure ARM platforms don't use coherent memory in BL1. */
120 assert((BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE) == 0U);
121 #endif
122
123 const mmap_region_t bl_regions[] = {
124 MAP_BL1_TOTAL,
125 MAP_BL1_RO,
126 #if USE_ROMLIB
127 ARM_MAP_ROMLIB_CODE,
128 ARM_MAP_ROMLIB_DATA,
129 #endif
130 {0}
131 };
132
133 setup_page_tables(bl_regions, plat_arm_get_mmap());
134 #ifdef __aarch64__
135 enable_mmu_el3(0);
136 #else
137 enable_mmu_svc_mon(0);
138 #endif /* __aarch64__ */
139
140 arm_setup_romlib();
141 }
142
bl1_plat_arch_setup(void)143 void bl1_plat_arch_setup(void)
144 {
145 arm_bl1_plat_arch_setup();
146 }
147
148 /*
149 * Perform the platform specific architecture setup shared between
150 * ARM standard platforms.
151 */
arm_bl1_platform_setup(void)152 void arm_bl1_platform_setup(void)
153 {
154 const struct dyn_cfg_dtb_info_t *config_info __unused;
155 uint32_t fw_config_max_size __unused;
156 image_info_t config_image_info __unused;
157 struct transfer_list_entry *te __unused;
158
159 image_desc_t *desc;
160
161 int err = -1;
162
163 /* Initialise the IO layer and register platform IO devices */
164 plat_arm_io_setup();
165
166 /* Check if we need FWU before further processing */
167 is_fwu_needed = plat_arm_bl1_fwu_needed();
168 if (is_fwu_needed) {
169 ERROR("Skip platform setup as FWU detected\n");
170 return;
171 }
172
173 #if TRANSFER_LIST
174 secure_tl = transfer_list_init((void *)PLAT_ARM_EL3_FW_HANDOFF_BASE,
175 PLAT_ARM_FW_HANDOFF_SIZE);
176
177 if (secure_tl == NULL) {
178 ERROR("Secure transfer list initialisation failed!\n");
179 panic();
180 }
181
182 te = transfer_list_add(secure_tl, TL_TAG_TB_FW_CONFIG,
183 ARM_TB_FW_CONFIG_MAX_SIZE, NULL);
184 assert(te != NULL);
185
186 /*
187 * Set the load address of TB_FW_CONFIG in the data section of the TE just
188 * allocated in the secure transfer list.
189 */
190 SET_PARAM_HEAD(&config_image_info, PARAM_IMAGE_BINARY, VERSION_2, 0);
191 config_image_info.image_base = (uintptr_t)transfer_list_entry_data(te);
192 config_image_info.image_max_size = te->data_size;
193
194 VERBOSE("FCONF: Loading config with image ID: %u\n", TB_FW_CONFIG_ID);
195 err = load_auth_image(TB_FW_CONFIG_ID, &config_image_info);
196 if (err != 0) {
197 VERBOSE("Failed to load config %u\n", TB_FW_CONFIG_ID);
198 plat_error_handler(err);
199 }
200
201 transfer_list_update_checksum(secure_tl);
202 fconf_populate("TB_FW", (uintptr_t)transfer_list_entry_data(te));
203 #else
204 /* Set global DTB info for fixed fw_config information */
205 fw_config_max_size = ARM_FW_CONFIG_LIMIT - ARM_FW_CONFIG_BASE;
206 set_config_info(ARM_FW_CONFIG_BASE, ~0UL, fw_config_max_size, FW_CONFIG_ID);
207
208 /* Fill the device tree information struct with the info from the config dtb */
209 err = fconf_load_config(FW_CONFIG_ID);
210 if (err < 0) {
211 ERROR("Loading of FW_CONFIG failed %d\n", err);
212 plat_error_handler(err);
213 }
214
215 /*
216 * FW_CONFIG loaded successfully. If FW_CONFIG device tree parsing
217 * is successful then load TB_FW_CONFIG device tree.
218 */
219 config_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, FW_CONFIG_ID);
220 if (config_info != NULL) {
221 err = fconf_populate_dtb_registry(config_info->config_addr);
222 if (err < 0) {
223 ERROR("Parsing of FW_CONFIG failed %d\n", err);
224 plat_error_handler(err);
225 }
226
227 /* load TB_FW_CONFIG */
228 err = fconf_load_config(TB_FW_CONFIG_ID);
229 if (err < 0) {
230 ERROR("Loading of TB_FW_CONFIG failed %d\n", err);
231 plat_error_handler(err);
232 }
233 } else {
234 ERROR("Invalid FW_CONFIG address\n");
235 plat_error_handler(err);
236 }
237 #endif /* TRANSFER_LIST */
238
239 desc = bl1_plat_get_image_desc(BL2_IMAGE_ID);
240
241 #if TRANSFER_LIST
242 transfer_list_set_handoff_args(secure_tl, &desc->ep_info);
243 #else
244 /* The BL2 ep_info arg0 is modified to point to FW_CONFIG */
245 assert(desc != NULL);
246 desc->ep_info.args.arg0 = config_info->config_addr;
247 #endif /* TRANSFER_LIST */
248
249 #if CRYPTO_SUPPORT
250 /* Share the Mbed TLS heap info with other images */
251 arm_bl1_set_mbedtls_heap();
252 #endif /* CRYPTO_SUPPORT */
253
254 /*
255 * Allow access to the System counter timer module and program
256 * counter frequency for non secure images during FWU
257 */
258 #ifdef ARM_SYS_TIMCTL_BASE
259 arm_configure_sys_timer();
260 #endif
261 #if (ARM_ARCH_MAJOR > 7) || defined(ARMV7_SUPPORTS_GENERIC_TIMER)
262 write_cntfrq_el0(plat_get_syscnt_freq2());
263 #endif
264 }
265
bl1_plat_prepare_exit(entry_point_info_t * ep_info)266 void bl1_plat_prepare_exit(entry_point_info_t *ep_info)
267 {
268 #if !ARM_DISABLE_TRUSTED_WDOG
269 /* Disable watchdog before leaving BL1 */
270 plat_arm_secure_wdt_stop();
271 #endif
272
273 #ifdef EL3_PAYLOAD_BASE
274 /*
275 * Program the EL3 payload's entry point address into the CPUs mailbox
276 * in order to release secondary CPUs from their holding pen and make
277 * them jump there.
278 */
279 plat_arm_program_trusted_mailbox(ep_info->pc);
280 dsbsy();
281 sev();
282 #endif
283 }
284
285 /*
286 * On Arm platforms, the FWU process is triggered when the FIP image has
287 * been tampered with.
288 */
plat_arm_bl1_fwu_needed(void)289 bool plat_arm_bl1_fwu_needed(void)
290 {
291 return !arm_io_is_toc_valid();
292 }
293
294 /*******************************************************************************
295 * The following function checks if Firmware update is needed,
296 * by checking if TOC in FIP image is valid or not.
297 ******************************************************************************/
bl1_plat_get_next_image_id(void)298 unsigned int bl1_plat_get_next_image_id(void)
299 {
300 return is_fwu_needed ? NS_BL1U_IMAGE_ID : BL2_IMAGE_ID;
301 }
302
303 // Use the default implementation of this function when Firmware Handoff is
304 // disabled to avoid duplicating its logic.
305 #if TRANSFER_LIST
bl1_plat_handle_post_image_load(unsigned int image_id)306 int bl1_plat_handle_post_image_load(unsigned int image_id)
307 {
308 image_desc_t *image_desc __unused;
309
310 assert(image_id == BL2_IMAGE_ID);
311 struct transfer_list_entry *te;
312
313 /* Convey this information to BL2 via its TL. */
314 te = transfer_list_add(secure_tl, TL_TAG_SRAM_LAYOUT64,
315 sizeof(meminfo_t), NULL);
316 assert(te != NULL);
317
318 bl1_plat_calc_bl2_layout(&bl1_tzram_layout,
319 (meminfo_t *)transfer_list_entry_data(te));
320
321 transfer_list_update_checksum(secure_tl);
322
323 /**
324 * Before exiting make sure the contents of the TL are flushed in case there's no
325 * support for hardware cache coherency.
326 */
327 flush_dcache_range((uintptr_t)secure_tl, secure_tl->size);
328 return 0;
329 }
330 #endif /* TRANSFER_LIST*/
331