1 /**************************************************************************//**
2 * @file core_ca.h
3 * @brief CMSIS Cortex-A Core Peripheral Access Layer Header File
4 * @version V1.0.1
5 * @date 07. May 2018
6 ******************************************************************************/
7 /*
8 * Copyright (c) 2009-2017 ARM Limited. All rights reserved.
9 *
10 * SPDX-License-Identifier: Apache-2.0
11 *
12 * Licensed under the Apache License, Version 2.0 (the License); you may
13 * not use this file except in compliance with the License.
14 * You may obtain a copy of the License at
15 *
16 * www.apache.org/licenses/LICENSE-2.0
17 *
18 * Unless required by applicable law or agreed to in writing, software
19 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
20 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21 * See the License for the specific language governing permissions and
22 * limitations under the License.
23 */
24
25 #if defined ( __ICCARM__ )
26 #pragma system_include /* treat file as system include file for MISRA check */
27 #elif defined (__clang__)
28 #pragma clang system_header /* treat file as system include file */
29 #endif
30
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34
35 #ifndef __CORE_CA_H_GENERIC
36 #define __CORE_CA_H_GENERIC
37
38
39 /*******************************************************************************
40 * CMSIS definitions
41 ******************************************************************************/
42
43 /* CMSIS CA definitions */
44 #define __CA_CMSIS_VERSION_MAIN (1U) /*!< \brief [31:16] CMSIS-Core(A) main version */
45 #define __CA_CMSIS_VERSION_SUB (1U) /*!< \brief [15:0] CMSIS-Core(A) sub version */
46 #define __CA_CMSIS_VERSION ((__CA_CMSIS_VERSION_MAIN << 16U) | \
47 __CA_CMSIS_VERSION_SUB ) /*!< \brief CMSIS-Core(A) version number */
48
49 #if defined ( __CC_ARM )
50 #if defined __TARGET_FPU_VFP
51 #if (__FPU_PRESENT == 1)
52 #define __FPU_USED 1U
53 #else
54 #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
55 #define __FPU_USED 0U
56 #endif
57 #else
58 #define __FPU_USED 0U
59 #endif
60
61 #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
62 #if defined __ARM_PCS_VFP
63 #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)
64 #define __FPU_USED 1U
65 #else
66 #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
67 #define __FPU_USED 0U
68 #endif
69 #else
70 #define __FPU_USED 0U
71 #endif
72
73 #elif defined ( __ICCARM__ )
74 #if defined __ARMVFP__
75 #if (__FPU_PRESENT == 1)
76 #define __FPU_USED 1U
77 #else
78 #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
79 #define __FPU_USED 0U
80 #endif
81 #else
82 #define __FPU_USED 0U
83 #endif
84
85 #elif defined ( __TMS470__ )
86 #if defined __TI_VFP_SUPPORT__
87 #if (__FPU_PRESENT == 1)
88 #define __FPU_USED 1U
89 #else
90 #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
91 #define __FPU_USED 0U
92 #endif
93 #else
94 #define __FPU_USED 0U
95 #endif
96
97 #elif defined ( __GNUC__ )
98 #if defined (__VFP_FP__) && !defined(__SOFTFP__)
99 #if (__FPU_PRESENT == 1)
100 #define __FPU_USED 1U
101 #else
102 #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
103 #define __FPU_USED 0U
104 #endif
105 #else
106 #define __FPU_USED 0U
107 #endif
108
109 #elif defined ( __TASKING__ )
110 #if defined __FPU_VFP__
111 #if (__FPU_PRESENT == 1)
112 #define __FPU_USED 1U
113 #else
114 #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
115 #define __FPU_USED 0U
116 #endif
117 #else
118 #define __FPU_USED 0U
119 #endif
120 #endif
121
122 #include "cmsis_compiler.h" /* CMSIS compiler specific defines */
123
124 #ifdef __cplusplus
125 }
126 #endif
127
128 #endif /* __CORE_CA_H_GENERIC */
129
130 #ifndef __CMSIS_GENERIC
131
132 #ifndef __CORE_CA_H_DEPENDANT
133 #define __CORE_CA_H_DEPENDANT
134
135 #ifdef __cplusplus
136 extern "C" {
137 #endif
138
139 /* check device defines and use defaults */
140 #if defined __CHECK_DEVICE_DEFINES
141 #ifndef __CA_REV
142 #define __CA_REV 0x0000U
143 #warning "__CA_REV not defined in device header file; using default!"
144 #endif
145
146 #ifndef __FPU_PRESENT
147 #define __FPU_PRESENT 0U
148 #warning "__FPU_PRESENT not defined in device header file; using default!"
149 #endif
150
151 #ifndef __GIC_PRESENT
152 #define __GIC_PRESENT 1U
153 #warning "__GIC_PRESENT not defined in device header file; using default!"
154 #endif
155
156 #ifndef __TIM_PRESENT
157 #define __TIM_PRESENT 1U
158 #warning "__TIM_PRESENT not defined in device header file; using default!"
159 #endif
160
161 #ifndef __L2C_PRESENT
162 #define __L2C_PRESENT 0U
163 #warning "__L2C_PRESENT not defined in device header file; using default!"
164 #endif
165 #endif
166
167 /* IO definitions (access restrictions to peripheral registers) */
168 #ifdef __cplusplus
169 #define __I volatile /*!< \brief Defines 'read only' permissions */
170 #else
171 #define __I volatile const /*!< \brief Defines 'read only' permissions */
172 #endif
173 #define __O volatile /*!< \brief Defines 'write only' permissions */
174 #define __IO volatile /*!< \brief Defines 'read / write' permissions */
175
176 /* following defines should be used for structure members */
177 #define __IM volatile const /*!< \brief Defines 'read only' structure member permissions */
178 #define __OM volatile /*!< \brief Defines 'write only' structure member permissions */
179 #define __IOM volatile /*!< \brief Defines 'read / write' structure member permissions */
180 #define RESERVED(N, T) T RESERVED##N; // placeholder struct members used for "reserved" areas
181
182 /*******************************************************************************
183 * Register Abstraction
184 Core Register contain:
185 - CPSR
186 - CP15 Registers
187 - L2C-310 Cache Controller
188 - Generic Interrupt Controller Distributor
189 - Generic Interrupt Controller Interface
190 ******************************************************************************/
191
192 /* Core Register CPSR */
193 typedef union
194 {
195 struct
196 {
197 uint32_t M:5; /*!< \brief bit: 0.. 4 Mode field */
198 uint32_t T:1; /*!< \brief bit: 5 Thumb execution state bit */
199 uint32_t F:1; /*!< \brief bit: 6 FIQ mask bit */
200 uint32_t I:1; /*!< \brief bit: 7 IRQ mask bit */
201 uint32_t A:1; /*!< \brief bit: 8 Asynchronous abort mask bit */
202 uint32_t E:1; /*!< \brief bit: 9 Endianness execution state bit */
203 uint32_t IT1:6; /*!< \brief bit: 10..15 If-Then execution state bits 2-7 */
204 uint32_t GE:4; /*!< \brief bit: 16..19 Greater than or Equal flags */
205 RESERVED(0:4, uint32_t)
206 uint32_t J:1; /*!< \brief bit: 24 Jazelle bit */
207 uint32_t IT0:2; /*!< \brief bit: 25..26 If-Then execution state bits 0-1 */
208 uint32_t Q:1; /*!< \brief bit: 27 Saturation condition flag */
209 uint32_t V:1; /*!< \brief bit: 28 Overflow condition code flag */
210 uint32_t C:1; /*!< \brief bit: 29 Carry condition code flag */
211 uint32_t Z:1; /*!< \brief bit: 30 Zero condition code flag */
212 uint32_t N:1; /*!< \brief bit: 31 Negative condition code flag */
213 } b; /*!< \brief Structure used for bit access */
214 uint32_t w; /*!< \brief Type used for word access */
215 } CPSR_Type;
216
217
218
219 /* CPSR Register Definitions */
220 #define CPSR_N_Pos 31U /*!< \brief CPSR: N Position */
221 #define CPSR_N_Msk (1UL << CPSR_N_Pos) /*!< \brief CPSR: N Mask */
222
223 #define CPSR_Z_Pos 30U /*!< \brief CPSR: Z Position */
224 #define CPSR_Z_Msk (1UL << CPSR_Z_Pos) /*!< \brief CPSR: Z Mask */
225
226 #define CPSR_C_Pos 29U /*!< \brief CPSR: C Position */
227 #define CPSR_C_Msk (1UL << CPSR_C_Pos) /*!< \brief CPSR: C Mask */
228
229 #define CPSR_V_Pos 28U /*!< \brief CPSR: V Position */
230 #define CPSR_V_Msk (1UL << CPSR_V_Pos) /*!< \brief CPSR: V Mask */
231
232 #define CPSR_Q_Pos 27U /*!< \brief CPSR: Q Position */
233 #define CPSR_Q_Msk (1UL << CPSR_Q_Pos) /*!< \brief CPSR: Q Mask */
234
235 #define CPSR_IT0_Pos 25U /*!< \brief CPSR: IT0 Position */
236 #define CPSR_IT0_Msk (3UL << CPSR_IT0_Pos) /*!< \brief CPSR: IT0 Mask */
237
238 #define CPSR_J_Pos 24U /*!< \brief CPSR: J Position */
239 #define CPSR_J_Msk (1UL << CPSR_J_Pos) /*!< \brief CPSR: J Mask */
240
241 #define CPSR_GE_Pos 16U /*!< \brief CPSR: GE Position */
242 #define CPSR_GE_Msk (0xFUL << CPSR_GE_Pos) /*!< \brief CPSR: GE Mask */
243
244 #define CPSR_IT1_Pos 10U /*!< \brief CPSR: IT1 Position */
245 #define CPSR_IT1_Msk (0x3FUL << CPSR_IT1_Pos) /*!< \brief CPSR: IT1 Mask */
246
247 #define CPSR_E_Pos 9U /*!< \brief CPSR: E Position */
248 #define CPSR_E_Msk (1UL << CPSR_E_Pos) /*!< \brief CPSR: E Mask */
249
250 #define CPSR_A_Pos 8U /*!< \brief CPSR: A Position */
251 #define CPSR_A_Msk (1UL << CPSR_A_Pos) /*!< \brief CPSR: A Mask */
252
253 #define CPSR_I_Pos 7U /*!< \brief CPSR: I Position */
254 #define CPSR_I_Msk (1UL << CPSR_I_Pos) /*!< \brief CPSR: I Mask */
255
256 #define CPSR_F_Pos 6U /*!< \brief CPSR: F Position */
257 #define CPSR_F_Msk (1UL << CPSR_F_Pos) /*!< \brief CPSR: F Mask */
258
259 #define CPSR_T_Pos 5U /*!< \brief CPSR: T Position */
260 #define CPSR_T_Msk (1UL << CPSR_T_Pos) /*!< \brief CPSR: T Mask */
261
262 #define CPSR_M_Pos 0U /*!< \brief CPSR: M Position */
263 #define CPSR_M_Msk (0x1FUL << CPSR_M_Pos) /*!< \brief CPSR: M Mask */
264
265 #define CPSR_M_USR 0x10U /*!< \brief CPSR: M User mode (PL0) */
266 #define CPSR_M_FIQ 0x11U /*!< \brief CPSR: M Fast Interrupt mode (PL1) */
267 #define CPSR_M_IRQ 0x12U /*!< \brief CPSR: M Interrupt mode (PL1) */
268 #define CPSR_M_SVC 0x13U /*!< \brief CPSR: M Supervisor mode (PL1) */
269 #define CPSR_M_MON 0x16U /*!< \brief CPSR: M Monitor mode (PL1) */
270 #define CPSR_M_ABT 0x17U /*!< \brief CPSR: M Abort mode (PL1) */
271 #define CPSR_M_HYP 0x1AU /*!< \brief CPSR: M Hypervisor mode (PL2) */
272 #define CPSR_M_UND 0x1BU /*!< \brief CPSR: M Undefined mode (PL1) */
273 #define CPSR_M_SYS 0x1FU /*!< \brief CPSR: M System mode (PL1) */
274
275 /* CP15 Register SCTLR */
276 typedef union
277 {
278 struct
279 {
280 uint32_t M:1; /*!< \brief bit: 0 MMU enable */
281 uint32_t A:1; /*!< \brief bit: 1 Alignment check enable */
282 uint32_t C:1; /*!< \brief bit: 2 Cache enable */
283 RESERVED(0:2, uint32_t)
284 uint32_t CP15BEN:1; /*!< \brief bit: 5 CP15 barrier enable */
285 RESERVED(1:1, uint32_t)
286 uint32_t B:1; /*!< \brief bit: 7 Endianness model */
287 RESERVED(2:2, uint32_t)
288 uint32_t SW:1; /*!< \brief bit: 10 SWP and SWPB enable */
289 uint32_t Z:1; /*!< \brief bit: 11 Branch prediction enable */
290 uint32_t I:1; /*!< \brief bit: 12 Instruction cache enable */
291 uint32_t V:1; /*!< \brief bit: 13 Vectors bit */
292 uint32_t RR:1; /*!< \brief bit: 14 Round Robin select */
293 RESERVED(3:2, uint32_t)
294 uint32_t HA:1; /*!< \brief bit: 17 Hardware Access flag enable */
295 RESERVED(4:1, uint32_t)
296 uint32_t WXN:1; /*!< \brief bit: 19 Write permission implies XN */
297 uint32_t UWXN:1; /*!< \brief bit: 20 Unprivileged write permission implies PL1 XN */
298 uint32_t FI:1; /*!< \brief bit: 21 Fast interrupts configuration enable */
299 uint32_t U:1; /*!< \brief bit: 22 Alignment model */
300 RESERVED(5:1, uint32_t)
301 uint32_t VE:1; /*!< \brief bit: 24 Interrupt Vectors Enable */
302 uint32_t EE:1; /*!< \brief bit: 25 Exception Endianness */
303 RESERVED(6:1, uint32_t)
304 uint32_t NMFI:1; /*!< \brief bit: 27 Non-maskable FIQ (NMFI) support */
305 uint32_t TRE:1; /*!< \brief bit: 28 TEX remap enable. */
306 uint32_t AFE:1; /*!< \brief bit: 29 Access flag enable */
307 uint32_t TE:1; /*!< \brief bit: 30 Thumb Exception enable */
308 RESERVED(7:1, uint32_t)
309 } b; /*!< \brief Structure used for bit access */
310 uint32_t w; /*!< \brief Type used for word access */
311 } SCTLR_Type;
312
313 #define SCTLR_TE_Pos 30U /*!< \brief SCTLR: TE Position */
314 #define SCTLR_TE_Msk (1UL << SCTLR_TE_Pos) /*!< \brief SCTLR: TE Mask */
315
316 #define SCTLR_AFE_Pos 29U /*!< \brief SCTLR: AFE Position */
317 #define SCTLR_AFE_Msk (1UL << SCTLR_AFE_Pos) /*!< \brief SCTLR: AFE Mask */
318
319 #define SCTLR_TRE_Pos 28U /*!< \brief SCTLR: TRE Position */
320 #define SCTLR_TRE_Msk (1UL << SCTLR_TRE_Pos) /*!< \brief SCTLR: TRE Mask */
321
322 #define SCTLR_NMFI_Pos 27U /*!< \brief SCTLR: NMFI Position */
323 #define SCTLR_NMFI_Msk (1UL << SCTLR_NMFI_Pos) /*!< \brief SCTLR: NMFI Mask */
324
325 #define SCTLR_EE_Pos 25U /*!< \brief SCTLR: EE Position */
326 #define SCTLR_EE_Msk (1UL << SCTLR_EE_Pos) /*!< \brief SCTLR: EE Mask */
327
328 #define SCTLR_VE_Pos 24U /*!< \brief SCTLR: VE Position */
329 #define SCTLR_VE_Msk (1UL << SCTLR_VE_Pos) /*!< \brief SCTLR: VE Mask */
330
331 #define SCTLR_U_Pos 22U /*!< \brief SCTLR: U Position */
332 #define SCTLR_U_Msk (1UL << SCTLR_U_Pos) /*!< \brief SCTLR: U Mask */
333
334 #define SCTLR_FI_Pos 21U /*!< \brief SCTLR: FI Position */
335 #define SCTLR_FI_Msk (1UL << SCTLR_FI_Pos) /*!< \brief SCTLR: FI Mask */
336
337 #define SCTLR_UWXN_Pos 20U /*!< \brief SCTLR: UWXN Position */
338 #define SCTLR_UWXN_Msk (1UL << SCTLR_UWXN_Pos) /*!< \brief SCTLR: UWXN Mask */
339
340 #define SCTLR_WXN_Pos 19U /*!< \brief SCTLR: WXN Position */
341 #define SCTLR_WXN_Msk (1UL << SCTLR_WXN_Pos) /*!< \brief SCTLR: WXN Mask */
342
343 #define SCTLR_HA_Pos 17U /*!< \brief SCTLR: HA Position */
344 #define SCTLR_HA_Msk (1UL << SCTLR_HA_Pos) /*!< \brief SCTLR: HA Mask */
345
346 #define SCTLR_RR_Pos 14U /*!< \brief SCTLR: RR Position */
347 #define SCTLR_RR_Msk (1UL << SCTLR_RR_Pos) /*!< \brief SCTLR: RR Mask */
348
349 #define SCTLR_V_Pos 13U /*!< \brief SCTLR: V Position */
350 #define SCTLR_V_Msk (1UL << SCTLR_V_Pos) /*!< \brief SCTLR: V Mask */
351
352 #define SCTLR_I_Pos 12U /*!< \brief SCTLR: I Position */
353 #define SCTLR_I_Msk (1UL << SCTLR_I_Pos) /*!< \brief SCTLR: I Mask */
354
355 #define SCTLR_Z_Pos 11U /*!< \brief SCTLR: Z Position */
356 #define SCTLR_Z_Msk (1UL << SCTLR_Z_Pos) /*!< \brief SCTLR: Z Mask */
357
358 #define SCTLR_SW_Pos 10U /*!< \brief SCTLR: SW Position */
359 #define SCTLR_SW_Msk (1UL << SCTLR_SW_Pos) /*!< \brief SCTLR: SW Mask */
360
361 #define SCTLR_B_Pos 7U /*!< \brief SCTLR: B Position */
362 #define SCTLR_B_Msk (1UL << SCTLR_B_Pos) /*!< \brief SCTLR: B Mask */
363
364 #define SCTLR_CP15BEN_Pos 5U /*!< \brief SCTLR: CP15BEN Position */
365 #define SCTLR_CP15BEN_Msk (1UL << SCTLR_CP15BEN_Pos) /*!< \brief SCTLR: CP15BEN Mask */
366
367 #define SCTLR_C_Pos 2U /*!< \brief SCTLR: C Position */
368 #define SCTLR_C_Msk (1UL << SCTLR_C_Pos) /*!< \brief SCTLR: C Mask */
369
370 #define SCTLR_A_Pos 1U /*!< \brief SCTLR: A Position */
371 #define SCTLR_A_Msk (1UL << SCTLR_A_Pos) /*!< \brief SCTLR: A Mask */
372
373 #define SCTLR_M_Pos 0U /*!< \brief SCTLR: M Position */
374 #define SCTLR_M_Msk (1UL << SCTLR_M_Pos) /*!< \brief SCTLR: M Mask */
375
376 /* CP15 Register ACTLR */
377 typedef union
378 {
379 #if __CORTEX_A == 5 || defined(DOXYGEN)
380 /** \brief Structure used for bit access on Cortex-A5 */
381 struct
382 {
383 uint32_t FW:1; /*!< \brief bit: 0 Cache and TLB maintenance broadcast */
384 RESERVED(0:5, uint32_t)
385 uint32_t SMP:1; /*!< \brief bit: 6 Enables coherent requests to the processor */
386 uint32_t EXCL:1; /*!< \brief bit: 7 Exclusive L1/L2 cache control */
387 RESERVED(1:2, uint32_t)
388 uint32_t DODMBS:1; /*!< \brief bit: 10 Disable optimized data memory barrier behavior */
389 uint32_t DWBST:1; /*!< \brief bit: 11 AXI data write bursts to Normal memory */
390 uint32_t RADIS:1; /*!< \brief bit: 12 L1 Data Cache read-allocate mode disable */
391 uint32_t L1PCTL:2; /*!< \brief bit:13..14 L1 Data prefetch control */
392 uint32_t BP:2; /*!< \brief bit:16..15 Branch prediction policy */
393 uint32_t RSDIS:1; /*!< \brief bit: 17 Disable return stack operation */
394 uint32_t BTDIS:1; /*!< \brief bit: 18 Disable indirect Branch Target Address Cache (BTAC) */
395 RESERVED(3:9, uint32_t)
396 uint32_t DBDI:1; /*!< \brief bit: 28 Disable branch dual issue */
397 RESERVED(7:3, uint32_t)
398 } b;
399 #endif
400 #if __CORTEX_A == 7 || defined(DOXYGEN)
401 /** \brief Structure used for bit access on Cortex-A7 */
402 struct
403 {
404 RESERVED(0:6, uint32_t)
405 uint32_t SMP:1; /*!< \brief bit: 6 Enables coherent requests to the processor */
406 RESERVED(1:3, uint32_t)
407 uint32_t DODMBS:1; /*!< \brief bit: 10 Disable optimized data memory barrier behavior */
408 uint32_t L2RADIS:1; /*!< \brief bit: 11 L2 Data Cache read-allocate mode disable */
409 uint32_t L1RADIS:1; /*!< \brief bit: 12 L1 Data Cache read-allocate mode disable */
410 uint32_t L1PCTL:2; /*!< \brief bit:13..14 L1 Data prefetch control */
411 uint32_t DDVM:1; /*!< \brief bit: 15 Disable Distributed Virtual Memory (DVM) transactions */
412 RESERVED(3:12, uint32_t)
413 uint32_t DDI:1; /*!< \brief bit: 28 Disable dual issue */
414 RESERVED(7:3, uint32_t)
415 } b;
416 #endif
417 #if __CORTEX_A == 9 || defined(DOXYGEN)
418 /** \brief Structure used for bit access on Cortex-A9 */
419 struct
420 {
421 uint32_t FW:1; /*!< \brief bit: 0 Cache and TLB maintenance broadcast */
422 RESERVED(0:1, uint32_t)
423 uint32_t L1PE:1; /*!< \brief bit: 2 Dside prefetch */
424 uint32_t WFLZM:1; /*!< \brief bit: 3 Cache and TLB maintenance broadcast */
425 RESERVED(1:2, uint32_t)
426 uint32_t SMP:1; /*!< \brief bit: 6 Enables coherent requests to the processor */
427 uint32_t EXCL:1; /*!< \brief bit: 7 Exclusive L1/L2 cache control */
428 uint32_t AOW:1; /*!< \brief bit: 8 Enable allocation in one cache way only */
429 uint32_t PARITY:1; /*!< \brief bit: 9 Support for parity checking, if implemented */
430 RESERVED(7:22, uint32_t)
431 } b;
432 #endif
433 uint32_t w; /*!< \brief Type used for word access */
434 } ACTLR_Type;
435
436 #define ACTLR_DDI_Pos 28U /*!< \brief ACTLR: DDI Position */
437 #define ACTLR_DDI_Msk (1UL << ACTLR_DDI_Pos) /*!< \brief ACTLR: DDI Mask */
438
439 #define ACTLR_DBDI_Pos 28U /*!< \brief ACTLR: DBDI Position */
440 #define ACTLR_DBDI_Msk (1UL << ACTLR_DBDI_Pos) /*!< \brief ACTLR: DBDI Mask */
441
442 #define ACTLR_BTDIS_Pos 18U /*!< \brief ACTLR: BTDIS Position */
443 #define ACTLR_BTDIS_Msk (1UL << ACTLR_BTDIS_Pos) /*!< \brief ACTLR: BTDIS Mask */
444
445 #define ACTLR_RSDIS_Pos 17U /*!< \brief ACTLR: RSDIS Position */
446 #define ACTLR_RSDIS_Msk (1UL << ACTLR_RSDIS_Pos) /*!< \brief ACTLR: RSDIS Mask */
447
448 #define ACTLR_BP_Pos 15U /*!< \brief ACTLR: BP Position */
449 #define ACTLR_BP_Msk (3UL << ACTLR_BP_Pos) /*!< \brief ACTLR: BP Mask */
450
451 #define ACTLR_DDVM_Pos 15U /*!< \brief ACTLR: DDVM Position */
452 #define ACTLR_DDVM_Msk (1UL << ACTLR_DDVM_Pos) /*!< \brief ACTLR: DDVM Mask */
453
454 #define ACTLR_L1PCTL_Pos 13U /*!< \brief ACTLR: L1PCTL Position */
455 #define ACTLR_L1PCTL_Msk (3UL << ACTLR_L1PCTL_Pos) /*!< \brief ACTLR: L1PCTL Mask */
456
457 #define ACTLR_RADIS_Pos 12U /*!< \brief ACTLR: RADIS Position */
458 #define ACTLR_RADIS_Msk (1UL << ACTLR_RADIS_Pos) /*!< \brief ACTLR: RADIS Mask */
459
460 #define ACTLR_L1RADIS_Pos 12U /*!< \brief ACTLR: L1RADIS Position */
461 #define ACTLR_L1RADIS_Msk (1UL << ACTLR_L1RADIS_Pos) /*!< \brief ACTLR: L1RADIS Mask */
462
463 #define ACTLR_DWBST_Pos 11U /*!< \brief ACTLR: DWBST Position */
464 #define ACTLR_DWBST_Msk (1UL << ACTLR_DWBST_Pos) /*!< \brief ACTLR: DWBST Mask */
465
466 #define ACTLR_L2RADIS_Pos 11U /*!< \brief ACTLR: L2RADIS Position */
467 #define ACTLR_L2RADIS_Msk (1UL << ACTLR_L2RADIS_Pos) /*!< \brief ACTLR: L2RADIS Mask */
468
469 #define ACTLR_DODMBS_Pos 10U /*!< \brief ACTLR: DODMBS Position */
470 #define ACTLR_DODMBS_Msk (1UL << ACTLR_DODMBS_Pos) /*!< \brief ACTLR: DODMBS Mask */
471
472 #define ACTLR_PARITY_Pos 9U /*!< \brief ACTLR: PARITY Position */
473 #define ACTLR_PARITY_Msk (1UL << ACTLR_PARITY_Pos) /*!< \brief ACTLR: PARITY Mask */
474
475 #define ACTLR_AOW_Pos 8U /*!< \brief ACTLR: AOW Position */
476 #define ACTLR_AOW_Msk (1UL << ACTLR_AOW_Pos) /*!< \brief ACTLR: AOW Mask */
477
478 #define ACTLR_EXCL_Pos 7U /*!< \brief ACTLR: EXCL Position */
479 #define ACTLR_EXCL_Msk (1UL << ACTLR_EXCL_Pos) /*!< \brief ACTLR: EXCL Mask */
480
481 #define ACTLR_SMP_Pos 6U /*!< \brief ACTLR: SMP Position */
482 #define ACTLR_SMP_Msk (1UL << ACTLR_SMP_Pos) /*!< \brief ACTLR: SMP Mask */
483
484 #define ACTLR_WFLZM_Pos 3U /*!< \brief ACTLR: WFLZM Position */
485 #define ACTLR_WFLZM_Msk (1UL << ACTLR_WFLZM_Pos) /*!< \brief ACTLR: WFLZM Mask */
486
487 #define ACTLR_L1PE_Pos 2U /*!< \brief ACTLR: L1PE Position */
488 #define ACTLR_L1PE_Msk (1UL << ACTLR_L1PE_Pos) /*!< \brief ACTLR: L1PE Mask */
489
490 #define ACTLR_FW_Pos 0U /*!< \brief ACTLR: FW Position */
491 #define ACTLR_FW_Msk (1UL << ACTLR_FW_Pos) /*!< \brief ACTLR: FW Mask */
492
493 /* CP15 Register CPACR */
494 typedef union
495 {
496 struct
497 {
498 uint32_t CP0:2; /*!< \brief bit: 0..1 Access rights for coprocessor 0 */
499 uint32_t CP1:2; /*!< \brief bit: 2..3 Access rights for coprocessor 1 */
500 uint32_t CP2:2; /*!< \brief bit: 4..5 Access rights for coprocessor 2 */
501 uint32_t CP3:2; /*!< \brief bit: 6..7 Access rights for coprocessor 3 */
502 uint32_t CP4:2; /*!< \brief bit: 8..9 Access rights for coprocessor 4 */
503 uint32_t CP5:2; /*!< \brief bit:10..11 Access rights for coprocessor 5 */
504 uint32_t CP6:2; /*!< \brief bit:12..13 Access rights for coprocessor 6 */
505 uint32_t CP7:2; /*!< \brief bit:14..15 Access rights for coprocessor 7 */
506 uint32_t CP8:2; /*!< \brief bit:16..17 Access rights for coprocessor 8 */
507 uint32_t CP9:2; /*!< \brief bit:18..19 Access rights for coprocessor 9 */
508 uint32_t CP10:2; /*!< \brief bit:20..21 Access rights for coprocessor 10 */
509 uint32_t CP11:2; /*!< \brief bit:22..23 Access rights for coprocessor 11 */
510 uint32_t CP12:2; /*!< \brief bit:24..25 Access rights for coprocessor 11 */
511 uint32_t CP13:2; /*!< \brief bit:26..27 Access rights for coprocessor 11 */
512 uint32_t TRCDIS:1; /*!< \brief bit: 28 Disable CP14 access to trace registers */
513 RESERVED(0:1, uint32_t)
514 uint32_t D32DIS:1; /*!< \brief bit: 30 Disable use of registers D16-D31 of the VFP register file */
515 uint32_t ASEDIS:1; /*!< \brief bit: 31 Disable Advanced SIMD Functionality */
516 } b; /*!< \brief Structure used for bit access */
517 uint32_t w; /*!< \brief Type used for word access */
518 } CPACR_Type;
519
520 #define CPACR_ASEDIS_Pos 31U /*!< \brief CPACR: ASEDIS Position */
521 #define CPACR_ASEDIS_Msk (1UL << CPACR_ASEDIS_Pos) /*!< \brief CPACR: ASEDIS Mask */
522
523 #define CPACR_D32DIS_Pos 30U /*!< \brief CPACR: D32DIS Position */
524 #define CPACR_D32DIS_Msk (1UL << CPACR_D32DIS_Pos) /*!< \brief CPACR: D32DIS Mask */
525
526 #define CPACR_TRCDIS_Pos 28U /*!< \brief CPACR: D32DIS Position */
527 #define CPACR_TRCDIS_Msk (1UL << CPACR_D32DIS_Pos) /*!< \brief CPACR: D32DIS Mask */
528
529 #define CPACR_CP_Pos_(n) (n*2U) /*!< \brief CPACR: CPn Position */
530 #define CPACR_CP_Msk_(n) (3UL << CPACR_CP_Pos_(n)) /*!< \brief CPACR: CPn Mask */
531
532 #define CPACR_CP_NA 0U /*!< \brief CPACR CPn field: Access denied. */
533 #define CPACR_CP_PL1 1U /*!< \brief CPACR CPn field: Accessible from PL1 only. */
534 #define CPACR_CP_FA 3U /*!< \brief CPACR CPn field: Full access. */
535
536 /* CP15 Register DFSR */
537 typedef union
538 {
539 struct
540 {
541 uint32_t FS0:4; /*!< \brief bit: 0.. 3 Fault Status bits bit 0-3 */
542 uint32_t Domain:4; /*!< \brief bit: 4.. 7 Fault on which domain */
543 RESERVED(0:1, uint32_t)
544 uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */
545 uint32_t FS1:1; /*!< \brief bit: 10 Fault Status bits bit 4 */
546 uint32_t WnR:1; /*!< \brief bit: 11 Write not Read bit */
547 uint32_t ExT:1; /*!< \brief bit: 12 External abort type */
548 uint32_t CM:1; /*!< \brief bit: 13 Cache maintenance fault */
549 RESERVED(1:18, uint32_t)
550 } s; /*!< \brief Structure used for bit access in short format */
551 struct
552 {
553 uint32_t STATUS:5; /*!< \brief bit: 0.. 5 Fault Status bits */
554 RESERVED(0:3, uint32_t)
555 uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */
556 RESERVED(1:1, uint32_t)
557 uint32_t WnR:1; /*!< \brief bit: 11 Write not Read bit */
558 uint32_t ExT:1; /*!< \brief bit: 12 External abort type */
559 uint32_t CM:1; /*!< \brief bit: 13 Cache maintenance fault */
560 RESERVED(2:18, uint32_t)
561 } l; /*!< \brief Structure used for bit access in long format */
562 uint32_t w; /*!< \brief Type used for word access */
563 } DFSR_Type;
564
565 #define DFSR_CM_Pos 13U /*!< \brief DFSR: CM Position */
566 #define DFSR_CM_Msk (1UL << DFSR_CM_Pos) /*!< \brief DFSR: CM Mask */
567
568 #define DFSR_Ext_Pos 12U /*!< \brief DFSR: Ext Position */
569 #define DFSR_Ext_Msk (1UL << DFSR_Ext_Pos) /*!< \brief DFSR: Ext Mask */
570
571 #define DFSR_WnR_Pos 11U /*!< \brief DFSR: WnR Position */
572 #define DFSR_WnR_Msk (1UL << DFSR_WnR_Pos) /*!< \brief DFSR: WnR Mask */
573
574 #define DFSR_FS1_Pos 10U /*!< \brief DFSR: FS1 Position */
575 #define DFSR_FS1_Msk (1UL << DFSR_FS1_Pos) /*!< \brief DFSR: FS1 Mask */
576
577 #define DFSR_LPAE_Pos 9U /*!< \brief DFSR: LPAE Position */
578 #define DFSR_LPAE_Msk (1UL << DFSR_LPAE_Pos) /*!< \brief DFSR: LPAE Mask */
579
580 #define DFSR_Domain_Pos 4U /*!< \brief DFSR: Domain Position */
581 #define DFSR_Domain_Msk (0xFUL << DFSR_Domain_Pos) /*!< \brief DFSR: Domain Mask */
582
583 #define DFSR_FS0_Pos 0U /*!< \brief DFSR: FS0 Position */
584 #define DFSR_FS0_Msk (0xFUL << DFSR_FS0_Pos) /*!< \brief DFSR: FS0 Mask */
585
586 #define DFSR_STATUS_Pos 0U /*!< \brief DFSR: STATUS Position */
587 #define DFSR_STATUS_Msk (0x3FUL << DFSR_STATUS_Pos) /*!< \brief DFSR: STATUS Mask */
588
589 /* CP15 Register IFSR */
590 typedef union
591 {
592 struct
593 {
594 uint32_t FS0:4; /*!< \brief bit: 0.. 3 Fault Status bits bit 0-3 */
595 RESERVED(0:5, uint32_t)
596 uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */
597 uint32_t FS1:1; /*!< \brief bit: 10 Fault Status bits bit 4 */
598 RESERVED(1:1, uint32_t)
599 uint32_t ExT:1; /*!< \brief bit: 12 External abort type */
600 RESERVED(2:19, uint32_t)
601 } s; /*!< \brief Structure used for bit access in short format */
602 struct
603 {
604 uint32_t STATUS:6; /*!< \brief bit: 0.. 5 Fault Status bits */
605 RESERVED(0:3, uint32_t)
606 uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */
607 RESERVED(1:2, uint32_t)
608 uint32_t ExT:1; /*!< \brief bit: 12 External abort type */
609 RESERVED(2:19, uint32_t)
610 } l; /*!< \brief Structure used for bit access in long format */
611 uint32_t w; /*!< \brief Type used for word access */
612 } IFSR_Type;
613
614 #define IFSR_ExT_Pos 12U /*!< \brief IFSR: ExT Position */
615 #define IFSR_ExT_Msk (1UL << IFSR_ExT_Pos) /*!< \brief IFSR: ExT Mask */
616
617 #define IFSR_FS1_Pos 10U /*!< \brief IFSR: FS1 Position */
618 #define IFSR_FS1_Msk (1UL << IFSR_FS1_Pos) /*!< \brief IFSR: FS1 Mask */
619
620 #define IFSR_LPAE_Pos 9U /*!< \brief IFSR: LPAE Position */
621 #define IFSR_LPAE_Msk (0x1UL << IFSR_LPAE_Pos) /*!< \brief IFSR: LPAE Mask */
622
623 #define IFSR_FS0_Pos 0U /*!< \brief IFSR: FS0 Position */
624 #define IFSR_FS0_Msk (0xFUL << IFSR_FS0_Pos) /*!< \brief IFSR: FS0 Mask */
625
626 #define IFSR_STATUS_Pos 0U /*!< \brief IFSR: STATUS Position */
627 #define IFSR_STATUS_Msk (0x3FUL << IFSR_STATUS_Pos) /*!< \brief IFSR: STATUS Mask */
628
629 /* CP15 Register ISR */
630 typedef union
631 {
632 struct
633 {
634 RESERVED(0:6, uint32_t)
635 uint32_t F:1; /*!< \brief bit: 6 FIQ pending bit */
636 uint32_t I:1; /*!< \brief bit: 7 IRQ pending bit */
637 uint32_t A:1; /*!< \brief bit: 8 External abort pending bit */
638 RESERVED(1:23, uint32_t)
639 } b; /*!< \brief Structure used for bit access */
640 uint32_t w; /*!< \brief Type used for word access */
641 } ISR_Type;
642
643 #define ISR_A_Pos 13U /*!< \brief ISR: A Position */
644 #define ISR_A_Msk (1UL << ISR_A_Pos) /*!< \brief ISR: A Mask */
645
646 #define ISR_I_Pos 12U /*!< \brief ISR: I Position */
647 #define ISR_I_Msk (1UL << ISR_I_Pos) /*!< \brief ISR: I Mask */
648
649 #define ISR_F_Pos 11U /*!< \brief ISR: F Position */
650 #define ISR_F_Msk (1UL << ISR_F_Pos) /*!< \brief ISR: F Mask */
651
652 /* DACR Register */
653 #define DACR_D_Pos_(n) (2U*n) /*!< \brief DACR: Dn Position */
654 #define DACR_D_Msk_(n) (3UL << DACR_D_Pos_(n)) /*!< \brief DACR: Dn Mask */
655 #define DACR_Dn_NOACCESS 0U /*!< \brief DACR Dn field: No access */
656 #define DACR_Dn_CLIENT 1U /*!< \brief DACR Dn field: Client */
657 #define DACR_Dn_MANAGER 3U /*!< \brief DACR Dn field: Manager */
658
659 /**
660 \brief Mask and shift a bit field value for use in a register bit range.
661 \param [in] field Name of the register bit field.
662 \param [in] value Value of the bit field. This parameter is interpreted as an uint32_t type.
663 \return Masked and shifted value.
664 */
665 #define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk)
666
667 /**
668 \brief Mask and shift a register value to extract a bit filed value.
669 \param [in] field Name of the register bit field.
670 \param [in] value Value of register. This parameter is interpreted as an uint32_t type.
671 \return Masked and shifted bit field value.
672 */
673 #define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos)
674
675
676 /**
677 \brief Union type to access the L2C_310 Cache Controller.
678 */
679 #if (__L2C_PRESENT == 1U) || defined(DOXYGEN)
680 typedef struct
681 {
682 __IM uint32_t CACHE_ID; /*!< \brief Offset: 0x0000 (R/ ) Cache ID Register */
683 __IM uint32_t CACHE_TYPE; /*!< \brief Offset: 0x0004 (R/ ) Cache Type Register */
684 RESERVED(0[0x3e], uint32_t)
685 __IOM uint32_t CONTROL; /*!< \brief Offset: 0x0100 (R/W) Control Register */
686 __IOM uint32_t AUX_CNT; /*!< \brief Offset: 0x0104 (R/W) Auxiliary Control */
687 RESERVED(1[0x3e], uint32_t)
688 __IOM uint32_t EVENT_CONTROL; /*!< \brief Offset: 0x0200 (R/W) Event Counter Control */
689 __IOM uint32_t EVENT_COUNTER1_CONF; /*!< \brief Offset: 0x0204 (R/W) Event Counter 1 Configuration */
690 __IOM uint32_t EVENT_COUNTER0_CONF; /*!< \brief Offset: 0x0208 (R/W) Event Counter 1 Configuration */
691 RESERVED(2[0x2], uint32_t)
692 __IOM uint32_t INTERRUPT_MASK; /*!< \brief Offset: 0x0214 (R/W) Interrupt Mask */
693 __IM uint32_t MASKED_INT_STATUS; /*!< \brief Offset: 0x0218 (R/ ) Masked Interrupt Status */
694 __IM uint32_t RAW_INT_STATUS; /*!< \brief Offset: 0x021c (R/ ) Raw Interrupt Status */
695 __OM uint32_t INTERRUPT_CLEAR; /*!< \brief Offset: 0x0220 ( /W) Interrupt Clear */
696 RESERVED(3[0x143], uint32_t)
697 __IOM uint32_t CACHE_SYNC; /*!< \brief Offset: 0x0730 (R/W) Cache Sync */
698 RESERVED(4[0xf], uint32_t)
699 __IOM uint32_t INV_LINE_PA; /*!< \brief Offset: 0x0770 (R/W) Invalidate Line By PA */
700 RESERVED(6[2], uint32_t)
701 __IOM uint32_t INV_WAY; /*!< \brief Offset: 0x077c (R/W) Invalidate by Way */
702 RESERVED(5[0xc], uint32_t)
703 __IOM uint32_t CLEAN_LINE_PA; /*!< \brief Offset: 0x07b0 (R/W) Clean Line by PA */
704 RESERVED(7[1], uint32_t)
705 __IOM uint32_t CLEAN_LINE_INDEX_WAY; /*!< \brief Offset: 0x07b8 (R/W) Clean Line by Index/Way */
706 __IOM uint32_t CLEAN_WAY; /*!< \brief Offset: 0x07bc (R/W) Clean by Way */
707 RESERVED(8[0xc], uint32_t)
708 __IOM uint32_t CLEAN_INV_LINE_PA; /*!< \brief Offset: 0x07f0 (R/W) Clean and Invalidate Line by PA */
709 RESERVED(9[1], uint32_t)
710 __IOM uint32_t CLEAN_INV_LINE_INDEX_WAY; /*!< \brief Offset: 0x07f8 (R/W) Clean and Invalidate Line by Index/Way */
711 __IOM uint32_t CLEAN_INV_WAY; /*!< \brief Offset: 0x07fc (R/W) Clean and Invalidate by Way */
712 RESERVED(10[0x40], uint32_t)
713 __IOM uint32_t DATA_LOCK_0_WAY; /*!< \brief Offset: 0x0900 (R/W) Data Lockdown 0 by Way */
714 __IOM uint32_t INST_LOCK_0_WAY; /*!< \brief Offset: 0x0904 (R/W) Instruction Lockdown 0 by Way */
715 __IOM uint32_t DATA_LOCK_1_WAY; /*!< \brief Offset: 0x0908 (R/W) Data Lockdown 1 by Way */
716 __IOM uint32_t INST_LOCK_1_WAY; /*!< \brief Offset: 0x090c (R/W) Instruction Lockdown 1 by Way */
717 __IOM uint32_t DATA_LOCK_2_WAY; /*!< \brief Offset: 0x0910 (R/W) Data Lockdown 2 by Way */
718 __IOM uint32_t INST_LOCK_2_WAY; /*!< \brief Offset: 0x0914 (R/W) Instruction Lockdown 2 by Way */
719 __IOM uint32_t DATA_LOCK_3_WAY; /*!< \brief Offset: 0x0918 (R/W) Data Lockdown 3 by Way */
720 __IOM uint32_t INST_LOCK_3_WAY; /*!< \brief Offset: 0x091c (R/W) Instruction Lockdown 3 by Way */
721 __IOM uint32_t DATA_LOCK_4_WAY; /*!< \brief Offset: 0x0920 (R/W) Data Lockdown 4 by Way */
722 __IOM uint32_t INST_LOCK_4_WAY; /*!< \brief Offset: 0x0924 (R/W) Instruction Lockdown 4 by Way */
723 __IOM uint32_t DATA_LOCK_5_WAY; /*!< \brief Offset: 0x0928 (R/W) Data Lockdown 5 by Way */
724 __IOM uint32_t INST_LOCK_5_WAY; /*!< \brief Offset: 0x092c (R/W) Instruction Lockdown 5 by Way */
725 __IOM uint32_t DATA_LOCK_6_WAY; /*!< \brief Offset: 0x0930 (R/W) Data Lockdown 5 by Way */
726 __IOM uint32_t INST_LOCK_6_WAY; /*!< \brief Offset: 0x0934 (R/W) Instruction Lockdown 5 by Way */
727 __IOM uint32_t DATA_LOCK_7_WAY; /*!< \brief Offset: 0x0938 (R/W) Data Lockdown 6 by Way */
728 __IOM uint32_t INST_LOCK_7_WAY; /*!< \brief Offset: 0x093c (R/W) Instruction Lockdown 6 by Way */
729 RESERVED(11[0x4], uint32_t)
730 __IOM uint32_t LOCK_LINE_EN; /*!< \brief Offset: 0x0950 (R/W) Lockdown by Line Enable */
731 __IOM uint32_t UNLOCK_ALL_BY_WAY; /*!< \brief Offset: 0x0954 (R/W) Unlock All Lines by Way */
732 RESERVED(12[0xaa], uint32_t)
733 __IOM uint32_t ADDRESS_FILTER_START; /*!< \brief Offset: 0x0c00 (R/W) Address Filtering Start */
734 __IOM uint32_t ADDRESS_FILTER_END; /*!< \brief Offset: 0x0c04 (R/W) Address Filtering End */
735 RESERVED(13[0xce], uint32_t)
736 __IOM uint32_t DEBUG_CONTROL; /*!< \brief Offset: 0x0f40 (R/W) Debug Control Register */
737 } L2C_310_TypeDef;
738
739 #define L2C_310 ((L2C_310_TypeDef *)L2C_310_BASE) /*!< \brief L2C_310 register set access pointer */
740 #endif
741
742 #if (__GIC_PRESENT == 1U) || defined(DOXYGEN)
743
744 /** \brief Structure type to access the Generic Interrupt Controller Distributor (GICD)
745 */
746 typedef struct
747 {
748 __IOM uint32_t CTLR; /*!< \brief Offset: 0x000 (R/W) Distributor Control Register */
749 __IM uint32_t TYPER; /*!< \brief Offset: 0x004 (R/ ) Interrupt Controller Type Register */
750 __IM uint32_t IIDR; /*!< \brief Offset: 0x008 (R/ ) Distributor Implementer Identification Register */
751 RESERVED(0, uint32_t)
752 __IOM uint32_t STATUSR; /*!< \brief Offset: 0x010 (R/W) Error Reporting Status Register, optional */
753 RESERVED(1[11], uint32_t)
754 __OM uint32_t SETSPI_NSR; /*!< \brief Offset: 0x040 ( /W) Set SPI Register */
755 RESERVED(2, uint32_t)
756 __OM uint32_t CLRSPI_NSR; /*!< \brief Offset: 0x048 ( /W) Clear SPI Register */
757 RESERVED(3, uint32_t)
758 __OM uint32_t SETSPI_SR; /*!< \brief Offset: 0x050 ( /W) Set SPI, Secure Register */
759 RESERVED(4, uint32_t)
760 __OM uint32_t CLRSPI_SR; /*!< \brief Offset: 0x058 ( /W) Clear SPI, Secure Register */
761 RESERVED(5[9], uint32_t)
762 __IOM uint32_t IGROUPR[32]; /*!< \brief Offset: 0x080 (R/W) Interrupt Group Registers */
763 __IOM uint32_t ISENABLER[32]; /*!< \brief Offset: 0x100 (R/W) Interrupt Set-Enable Registers */
764 __IOM uint32_t ICENABLER[32]; /*!< \brief Offset: 0x180 (R/W) Interrupt Clear-Enable Registers */
765 __IOM uint32_t ISPENDR[32]; /*!< \brief Offset: 0x200 (R/W) Interrupt Set-Pending Registers */
766 __IOM uint32_t ICPENDR[32]; /*!< \brief Offset: 0x280 (R/W) Interrupt Clear-Pending Registers */
767 __IOM uint32_t ISACTIVER[32]; /*!< \brief Offset: 0x300 (R/W) Interrupt Set-Active Registers */
768 __IOM uint32_t ICACTIVER[32]; /*!< \brief Offset: 0x380 (R/W) Interrupt Clear-Active Registers */
769 __IOM uint32_t IPRIORITYR[255]; /*!< \brief Offset: 0x400 (R/W) Interrupt Priority Registers */
770 RESERVED(6, uint32_t)
771 __IOM uint32_t ITARGETSR[255]; /*!< \brief Offset: 0x800 (R/W) Interrupt Targets Registers */
772 RESERVED(7, uint32_t)
773 __IOM uint32_t ICFGR[64]; /*!< \brief Offset: 0xC00 (R/W) Interrupt Configuration Registers */
774 __IOM uint32_t IGRPMODR[32]; /*!< \brief Offset: 0xD00 (R/W) Interrupt Group Modifier Registers */
775 RESERVED(8[32], uint32_t)
776 __IOM uint32_t NSACR[64]; /*!< \brief Offset: 0xE00 (R/W) Non-secure Access Control Registers */
777 __OM uint32_t SGIR; /*!< \brief Offset: 0xF00 ( /W) Software Generated Interrupt Register */
778 RESERVED(9[3], uint32_t)
779 __IOM uint32_t CPENDSGIR[4]; /*!< \brief Offset: 0xF10 (R/W) SGI Clear-Pending Registers */
780 __IOM uint32_t SPENDSGIR[4]; /*!< \brief Offset: 0xF20 (R/W) SGI Set-Pending Registers */
781 RESERVED(10[5236], uint32_t)
782 __IOM uint64_t IROUTER[988]; /*!< \brief Offset: 0x6100(R/W) Interrupt Routing Registers */
783 } GICDistributor_Type;
784
785 #define GICDistributor ((GICDistributor_Type *) GIC_DISTRIBUTOR_BASE ) /*!< \brief GIC Distributor register set access pointer */
786
787 /** \brief Structure type to access the Generic Interrupt Controller Interface (GICC)
788 */
789 typedef struct
790 {
791 __IOM uint32_t CTLR; /*!< \brief Offset: 0x000 (R/W) CPU Interface Control Register */
792 __IOM uint32_t PMR; /*!< \brief Offset: 0x004 (R/W) Interrupt Priority Mask Register */
793 __IOM uint32_t BPR; /*!< \brief Offset: 0x008 (R/W) Binary Point Register */
794 __IM uint32_t IAR; /*!< \brief Offset: 0x00C (R/ ) Interrupt Acknowledge Register */
795 __OM uint32_t EOIR; /*!< \brief Offset: 0x010 ( /W) End Of Interrupt Register */
796 __IM uint32_t RPR; /*!< \brief Offset: 0x014 (R/ ) Running Priority Register */
797 __IM uint32_t HPPIR; /*!< \brief Offset: 0x018 (R/ ) Highest Priority Pending Interrupt Register */
798 __IOM uint32_t ABPR; /*!< \brief Offset: 0x01C (R/W) Aliased Binary Point Register */
799 __IM uint32_t AIAR; /*!< \brief Offset: 0x020 (R/ ) Aliased Interrupt Acknowledge Register */
800 __OM uint32_t AEOIR; /*!< \brief Offset: 0x024 ( /W) Aliased End Of Interrupt Register */
801 __IM uint32_t AHPPIR; /*!< \brief Offset: 0x028 (R/ ) Aliased Highest Priority Pending Interrupt Register */
802 __IOM uint32_t STATUSR; /*!< \brief Offset: 0x02C (R/W) Error Reporting Status Register, optional */
803 RESERVED(1[40], uint32_t)
804 __IOM uint32_t APR[4]; /*!< \brief Offset: 0x0D0 (R/W) Active Priority Register */
805 __IOM uint32_t NSAPR[4]; /*!< \brief Offset: 0x0E0 (R/W) Non-secure Active Priority Register */
806 RESERVED(2[3], uint32_t)
807 __IM uint32_t IIDR; /*!< \brief Offset: 0x0FC (R/ ) CPU Interface Identification Register */
808 RESERVED(3[960], uint32_t)
809 __OM uint32_t DIR; /*!< \brief Offset: 0x1000( /W) Deactivate Interrupt Register */
810 } GICInterface_Type;
811
812 #define GICInterface ((GICInterface_Type *) GIC_INTERFACE_BASE ) /*!< \brief GIC Interface register set access pointer */
813 #endif
814
815 #if (__TIM_PRESENT == 1U) || defined(DOXYGEN)
816 #if ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN)
817 /** \brief Structure type to access the Private Timer
818 */
819 typedef struct
820 {
821 __IOM uint32_t LOAD; //!< \brief Offset: 0x000 (R/W) Private Timer Load Register
822 __IOM uint32_t COUNTER; //!< \brief Offset: 0x004 (R/W) Private Timer Counter Register
823 __IOM uint32_t CONTROL; //!< \brief Offset: 0x008 (R/W) Private Timer Control Register
824 __IOM uint32_t ISR; //!< \brief Offset: 0x00C (R/W) Private Timer Interrupt Status Register
825 RESERVED(0[4], uint32_t)
826 __IOM uint32_t WLOAD; //!< \brief Offset: 0x020 (R/W) Watchdog Load Register
827 __IOM uint32_t WCOUNTER; //!< \brief Offset: 0x024 (R/W) Watchdog Counter Register
828 __IOM uint32_t WCONTROL; //!< \brief Offset: 0x028 (R/W) Watchdog Control Register
829 __IOM uint32_t WISR; //!< \brief Offset: 0x02C (R/W) Watchdog Interrupt Status Register
830 __IOM uint32_t WRESET; //!< \brief Offset: 0x030 (R/W) Watchdog Reset Status Register
831 __OM uint32_t WDISABLE; //!< \brief Offset: 0x034 ( /W) Watchdog Disable Register
832 } Timer_Type;
833 #define PTIM ((Timer_Type *) TIMER_BASE ) /*!< \brief Timer register struct */
834 #endif
835 #endif
836
837 /*******************************************************************************
838 * Hardware Abstraction Layer
839 Core Function Interface contains:
840 - L1 Cache Functions
841 - L2C-310 Cache Controller Functions
842 - PL1 Timer Functions
843 - GIC Functions
844 - MMU Functions
845 ******************************************************************************/
846
847 /* ########################## L1 Cache functions ################################# */
848
849 /** \brief Enable Caches by setting I and C bits in SCTLR register.
850 */
L1C_EnableCaches(void)851 __STATIC_FORCEINLINE void L1C_EnableCaches(void) {
852 __set_SCTLR( __get_SCTLR() | SCTLR_I_Msk | SCTLR_C_Msk);
853 __ISB();
854 }
855
856 /** \brief Disable Caches by clearing I and C bits in SCTLR register.
857 */
L1C_DisableCaches(void)858 __STATIC_FORCEINLINE void L1C_DisableCaches(void) {
859 __set_SCTLR( __get_SCTLR() & (~SCTLR_I_Msk) & (~SCTLR_C_Msk));
860 __ISB();
861 }
862
863 /** \brief Enable Branch Prediction by setting Z bit in SCTLR register.
864 */
L1C_EnableBTAC(void)865 __STATIC_FORCEINLINE void L1C_EnableBTAC(void) {
866 __set_SCTLR( __get_SCTLR() | SCTLR_Z_Msk);
867 __ISB();
868 }
869
870 /** \brief Disable Branch Prediction by clearing Z bit in SCTLR register.
871 */
L1C_DisableBTAC(void)872 __STATIC_FORCEINLINE void L1C_DisableBTAC(void) {
873 __set_SCTLR( __get_SCTLR() & (~SCTLR_Z_Msk));
874 __ISB();
875 }
876
877 /** \brief Invalidate entire branch predictor array
878 */
L1C_InvalidateBTAC(void)879 __STATIC_FORCEINLINE void L1C_InvalidateBTAC(void) {
880 __set_BPIALL(0);
881 __DSB(); //ensure completion of the invalidation
882 __ISB(); //ensure instruction fetch path sees new state
883 }
884
885 /** \brief Invalidate the whole instruction cache
886 */
L1C_InvalidateICacheAll(void)887 __STATIC_FORCEINLINE void L1C_InvalidateICacheAll(void) {
888 __set_ICIALLU(0);
889 __DSB(); //ensure completion of the invalidation
890 __ISB(); //ensure instruction fetch path sees new I cache state
891 }
892
893 /** \brief Clean data cache line by address.
894 * \param [in] va Pointer to data to clear the cache for.
895 */
L1C_CleanDCacheMVA(void * va)896 __STATIC_FORCEINLINE void L1C_CleanDCacheMVA(void *va) {
897 __set_DCCMVAC((uint32_t)va);
898 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
899 }
900
901 /** \brief Invalidate data cache line by address.
902 * \param [in] va Pointer to data to invalidate the cache for.
903 */
L1C_InvalidateDCacheMVA(void * va)904 __STATIC_FORCEINLINE void L1C_InvalidateDCacheMVA(void *va) {
905 __set_DCIMVAC((uint32_t)va);
906 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
907 }
908
909 /** \brief Clean and Invalidate data cache by address.
910 * \param [in] va Pointer to data to invalidate the cache for.
911 */
L1C_CleanInvalidateDCacheMVA(void * va)912 __STATIC_FORCEINLINE void L1C_CleanInvalidateDCacheMVA(void *va) {
913 __set_DCCIMVAC((uint32_t)va);
914 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
915 }
916
917 /** \brief Calculate log2 rounded up
918 * - log(0) => 0
919 * - log(1) => 0
920 * - log(2) => 1
921 * - log(3) => 2
922 * - log(4) => 2
923 * - log(5) => 3
924 * : :
925 * - log(16) => 4
926 * - log(32) => 5
927 * : :
928 * \param [in] n input value parameter
929 * \return log2(n)
930 */
__log2_up(uint32_t n)931 __STATIC_FORCEINLINE uint8_t __log2_up(uint32_t n)
932 {
933 if (n < 2U) {
934 return 0U;
935 }
936 uint8_t log = 0U;
937 uint32_t t = n;
938 while(t > 1U)
939 {
940 log++;
941 t >>= 1U;
942 }
943 if (n & 1U) { log++; }
944 return log;
945 }
946
947 /** \brief Apply cache maintenance to given cache level.
948 * \param [in] level cache level to be maintained
949 * \param [in] maint 0 - invalidate, 1 - clean, otherwise - invalidate and clean
950 */
__L1C_MaintainDCacheSetWay(uint32_t level,uint32_t maint)951 __STATIC_FORCEINLINE void __L1C_MaintainDCacheSetWay(uint32_t level, uint32_t maint)
952 {
953 uint32_t Dummy;
954 uint32_t ccsidr;
955 uint32_t num_sets;
956 uint32_t num_ways;
957 uint32_t shift_way;
958 uint32_t log2_linesize;
959 int32_t log2_num_ways;
960
961 Dummy = level << 1U;
962 /* set csselr, select ccsidr register */
963 __set_CSSELR(Dummy);
964 /* get current ccsidr register */
965 ccsidr = __get_CCSIDR();
966 num_sets = ((ccsidr & 0x0FFFE000U) >> 13U) + 1U;
967 num_ways = ((ccsidr & 0x00001FF8U) >> 3U) + 1U;
968 log2_linesize = (ccsidr & 0x00000007U) + 2U + 2U;
969 log2_num_ways = __log2_up(num_ways);
970 if ((log2_num_ways < 0) || (log2_num_ways > 32)) {
971 return; // FATAL ERROR
972 }
973 shift_way = 32U - (uint32_t)log2_num_ways;
974 for(int32_t way = num_ways-1; way >= 0; way--)
975 {
976 for(int32_t set = num_sets-1; set >= 0; set--)
977 {
978 Dummy = (level << 1U) | (((uint32_t)set) << log2_linesize) | (((uint32_t)way) << shift_way);
979 switch (maint)
980 {
981 case 0U: __set_DCISW(Dummy); break;
982 case 1U: __set_DCCSW(Dummy); break;
983 default: __set_DCCISW(Dummy); break;
984 }
985 }
986 }
987 __DMB();
988 }
989
990 /** \brief Clean and Invalidate the entire data or unified cache
991 * Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency
992 * \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean
993 */
L1C_CleanInvalidateCache(uint32_t op)994 __STATIC_FORCEINLINE void L1C_CleanInvalidateCache(uint32_t op) {
995 uint32_t clidr;
996 uint32_t cache_type;
997 clidr = __get_CLIDR();
998 for(uint32_t i = 0U; i<7U; i++)
999 {
1000 cache_type = (clidr >> i*3U) & 0x7UL;
1001 if ((cache_type >= 2U) && (cache_type <= 4U))
1002 {
1003 __L1C_MaintainDCacheSetWay(i, op);
1004 }
1005 }
1006 }
1007
1008 /** \brief Clean and Invalidate the entire data or unified cache
1009 * Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency
1010 * \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean
1011 * \deprecated Use generic L1C_CleanInvalidateCache instead.
1012 */
1013 CMSIS_DEPRECATED
__L1C_CleanInvalidateCache(uint32_t op)1014 __STATIC_FORCEINLINE void __L1C_CleanInvalidateCache(uint32_t op) {
1015 L1C_CleanInvalidateCache(op);
1016 }
1017
1018 /** \brief Invalidate the whole data cache.
1019 */
L1C_InvalidateDCacheAll(void)1020 __STATIC_FORCEINLINE void L1C_InvalidateDCacheAll(void) {
1021 L1C_CleanInvalidateCache(0);
1022 }
1023
1024 /** \brief Clean the whole data cache.
1025 */
L1C_CleanDCacheAll(void)1026 __STATIC_FORCEINLINE void L1C_CleanDCacheAll(void) {
1027 L1C_CleanInvalidateCache(1);
1028 }
1029
1030 /** \brief Clean and invalidate the whole data cache.
1031 */
L1C_CleanInvalidateDCacheAll(void)1032 __STATIC_FORCEINLINE void L1C_CleanInvalidateDCacheAll(void) {
1033 L1C_CleanInvalidateCache(2);
1034 }
1035
1036 /* ########################## L2 Cache functions ################################# */
1037 #if (__L2C_PRESENT == 1U) || defined(DOXYGEN)
1038 /** \brief Cache Sync operation by writing CACHE_SYNC register.
1039 */
L2C_Sync(void)1040 __STATIC_INLINE void L2C_Sync(void)
1041 {
1042 L2C_310->CACHE_SYNC = 0x0;
1043 }
1044
1045 /** \brief Read cache controller cache ID from CACHE_ID register.
1046 * \return L2C_310_TypeDef::CACHE_ID
1047 */
L2C_GetID(void)1048 __STATIC_INLINE int L2C_GetID (void)
1049 {
1050 return L2C_310->CACHE_ID;
1051 }
1052
1053 /** \brief Read cache controller cache type from CACHE_TYPE register.
1054 * \return L2C_310_TypeDef::CACHE_TYPE
1055 */
L2C_GetType(void)1056 __STATIC_INLINE int L2C_GetType (void)
1057 {
1058 return L2C_310->CACHE_TYPE;
1059 }
1060
1061 /** \brief Invalidate all cache by way
1062 */
L2C_InvAllByWay(void)1063 __STATIC_INLINE void L2C_InvAllByWay (void)
1064 {
1065 unsigned int assoc;
1066
1067 if (L2C_310->AUX_CNT & (1U << 16U)) {
1068 assoc = 16U;
1069 } else {
1070 assoc = 8U;
1071 }
1072
1073 L2C_310->INV_WAY = (1U << assoc) - 1U;
1074 while(L2C_310->INV_WAY & ((1U << assoc) - 1U)); //poll invalidate
1075
1076 L2C_Sync();
1077 }
1078
1079 /** \brief Clean and Invalidate all cache by way
1080 */
L2C_CleanInvAllByWay(void)1081 __STATIC_INLINE void L2C_CleanInvAllByWay (void)
1082 {
1083 unsigned int assoc;
1084
1085 if (L2C_310->AUX_CNT & (1U << 16U)) {
1086 assoc = 16U;
1087 } else {
1088 assoc = 8U;
1089 }
1090
1091 L2C_310->CLEAN_INV_WAY = (1U << assoc) - 1U;
1092 while(L2C_310->CLEAN_INV_WAY & ((1U << assoc) - 1U)); //poll invalidate
1093
1094 L2C_Sync();
1095 }
1096
1097 /** \brief Enable Level 2 Cache
1098 */
L2C_Enable(void)1099 __STATIC_INLINE void L2C_Enable(void)
1100 {
1101 L2C_310->CONTROL = 0;
1102 L2C_310->INTERRUPT_CLEAR = 0x000001FFuL;
1103 L2C_310->DEBUG_CONTROL = 0;
1104 L2C_310->DATA_LOCK_0_WAY = 0;
1105 L2C_310->CACHE_SYNC = 0;
1106 L2C_310->CONTROL = 0x01;
1107 L2C_Sync();
1108 }
1109
1110 /** \brief Disable Level 2 Cache
1111 */
L2C_Disable(void)1112 __STATIC_INLINE void L2C_Disable(void)
1113 {
1114 L2C_310->CONTROL = 0x00;
1115 L2C_Sync();
1116 }
1117
1118 /** \brief Invalidate cache by physical address
1119 * \param [in] pa Pointer to data to invalidate cache for.
1120 */
L2C_InvPa(void * pa)1121 __STATIC_INLINE void L2C_InvPa (void *pa)
1122 {
1123 L2C_310->INV_LINE_PA = (unsigned int)pa;
1124 L2C_Sync();
1125 }
1126
1127 /** \brief Clean cache by physical address
1128 * \param [in] pa Pointer to data to invalidate cache for.
1129 */
L2C_CleanPa(void * pa)1130 __STATIC_INLINE void L2C_CleanPa (void *pa)
1131 {
1132 L2C_310->CLEAN_LINE_PA = (unsigned int)pa;
1133 L2C_Sync();
1134 }
1135
1136 /** \brief Clean and invalidate cache by physical address
1137 * \param [in] pa Pointer to data to invalidate cache for.
1138 */
L2C_CleanInvPa(void * pa)1139 __STATIC_INLINE void L2C_CleanInvPa (void *pa)
1140 {
1141 L2C_310->CLEAN_INV_LINE_PA = (unsigned int)pa;
1142 L2C_Sync();
1143 }
1144 #endif
1145
1146 /* ########################## GIC functions ###################################### */
1147 #if (__GIC_PRESENT == 1U) || defined(DOXYGEN)
1148
1149 /** \brief Enable the interrupt distributor using the GIC's CTLR register.
1150 */
GIC_EnableDistributor(void)1151 __STATIC_INLINE void GIC_EnableDistributor(void)
1152 {
1153 GICDistributor->CTLR |= 1U;
1154 }
1155
1156 /** \brief Disable the interrupt distributor using the GIC's CTLR register.
1157 */
GIC_DisableDistributor(void)1158 __STATIC_INLINE void GIC_DisableDistributor(void)
1159 {
1160 GICDistributor->CTLR &=~1U;
1161 }
1162
1163 /** \brief Read the GIC's TYPER register.
1164 * \return GICDistributor_Type::TYPER
1165 */
GIC_DistributorInfo(void)1166 __STATIC_INLINE uint32_t GIC_DistributorInfo(void)
1167 {
1168 return (GICDistributor->TYPER);
1169 }
1170
1171 /** \brief Reads the GIC's IIDR register.
1172 * \return GICDistributor_Type::IIDR
1173 */
GIC_DistributorImplementer(void)1174 __STATIC_INLINE uint32_t GIC_DistributorImplementer(void)
1175 {
1176 return (GICDistributor->IIDR);
1177 }
1178
1179 /** \brief Sets the GIC's ITARGETSR register for the given interrupt.
1180 * \param [in] IRQn Interrupt to be configured.
1181 * \param [in] cpu_target CPU interfaces to assign this interrupt to.
1182 */
GIC_SetTarget(IRQn_Type IRQn,uint32_t cpu_target)1183 __STATIC_INLINE void GIC_SetTarget(IRQn_Type IRQn, uint32_t cpu_target)
1184 {
1185 uint32_t mask = GICDistributor->ITARGETSR[IRQn / 4U] & ~(0xFFUL << ((IRQn % 4U) * 8U));
1186 GICDistributor->ITARGETSR[IRQn / 4U] = mask | ((cpu_target & 0xFFUL) << ((IRQn % 4U) * 8U));
1187 }
1188
1189 /** \brief Read the GIC's ITARGETSR register.
1190 * \param [in] IRQn Interrupt to acquire the configuration for.
1191 * \return GICDistributor_Type::ITARGETSR
1192 */
GIC_GetTarget(IRQn_Type IRQn)1193 __STATIC_INLINE uint32_t GIC_GetTarget(IRQn_Type IRQn)
1194 {
1195 return (GICDistributor->ITARGETSR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL;
1196 }
1197
1198 /** \brief Enable the CPU's interrupt interface.
1199 */
GIC_EnableInterface(void)1200 __STATIC_INLINE void GIC_EnableInterface(void)
1201 {
1202 GICInterface->CTLR |= 1U; //enable interface
1203 }
1204
1205 /** \brief Disable the CPU's interrupt interface.
1206 */
GIC_DisableInterface(void)1207 __STATIC_INLINE void GIC_DisableInterface(void)
1208 {
1209 GICInterface->CTLR &=~1U; //disable distributor
1210 }
1211
1212 /** \brief Read the CPU's IAR register.
1213 * \return GICInterface_Type::IAR
1214 */
GIC_AcknowledgePending(void)1215 __STATIC_INLINE IRQn_Type GIC_AcknowledgePending(void)
1216 {
1217 return (IRQn_Type)(GICInterface->IAR);
1218 }
1219
1220 /** \brief Writes the given interrupt number to the CPU's EOIR register.
1221 * \param [in] IRQn The interrupt to be signaled as finished.
1222 */
GIC_EndInterrupt(IRQn_Type IRQn)1223 __STATIC_INLINE void GIC_EndInterrupt(IRQn_Type IRQn)
1224 {
1225 GICInterface->EOIR = IRQn;
1226 }
1227
1228 /** \brief Enables the given interrupt using GIC's ISENABLER register.
1229 * \param [in] IRQn The interrupt to be enabled.
1230 */
GIC_EnableIRQ(IRQn_Type IRQn)1231 __STATIC_INLINE void GIC_EnableIRQ(IRQn_Type IRQn)
1232 {
1233 GICDistributor->ISENABLER[IRQn / 32U] = 1U << (IRQn % 32U);
1234 }
1235
1236 /** \brief Get interrupt enable status using GIC's ISENABLER register.
1237 * \param [in] IRQn The interrupt to be queried.
1238 * \return 0 - interrupt is not enabled, 1 - interrupt is enabled.
1239 */
GIC_GetEnableIRQ(IRQn_Type IRQn)1240 __STATIC_INLINE uint32_t GIC_GetEnableIRQ(IRQn_Type IRQn)
1241 {
1242 return (GICDistributor->ISENABLER[IRQn / 32U] >> (IRQn % 32U)) & 1UL;
1243 }
1244
1245 /** \brief Disables the given interrupt using GIC's ICENABLER register.
1246 * \param [in] IRQn The interrupt to be disabled.
1247 */
GIC_DisableIRQ(IRQn_Type IRQn)1248 __STATIC_INLINE void GIC_DisableIRQ(IRQn_Type IRQn)
1249 {
1250 GICDistributor->ICENABLER[IRQn / 32U] = 1U << (IRQn % 32U);
1251 }
1252
1253 /** \brief Get interrupt pending status from GIC's ISPENDR register.
1254 * \param [in] IRQn The interrupt to be queried.
1255 * \return 0 - interrupt is not pending, 1 - interrupt is pendig.
1256 */
GIC_GetPendingIRQ(IRQn_Type IRQn)1257 __STATIC_INLINE uint32_t GIC_GetPendingIRQ(IRQn_Type IRQn)
1258 {
1259 uint32_t pend;
1260
1261 if (IRQn >= 16U) {
1262 pend = (GICDistributor->ISPENDR[IRQn / 32U] >> (IRQn % 32U)) & 1UL;
1263 } else {
1264 // INTID 0-15 Software Generated Interrupt
1265 pend = (GICDistributor->SPENDSGIR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL;
1266 // No CPU identification offered
1267 if (pend != 0U) {
1268 pend = 1U;
1269 } else {
1270 pend = 0U;
1271 }
1272 }
1273
1274 return (pend);
1275 }
1276
1277 /** \brief Sets the given interrupt as pending using GIC's ISPENDR register.
1278 * \param [in] IRQn The interrupt to be enabled.
1279 */
GIC_SetPendingIRQ(IRQn_Type IRQn)1280 __STATIC_INLINE void GIC_SetPendingIRQ(IRQn_Type IRQn)
1281 {
1282 if (IRQn >= 16U) {
1283 GICDistributor->ISPENDR[IRQn / 32U] = 1U << (IRQn % 32U);
1284 } else {
1285 // INTID 0-15 Software Generated Interrupt
1286 GICDistributor->SPENDSGIR[IRQn / 4U] = 1U << ((IRQn % 4U) * 8U);
1287 }
1288 }
1289
1290 /** \brief Clears the given interrupt from being pending using GIC's ICPENDR register.
1291 * \param [in] IRQn The interrupt to be enabled.
1292 */
GIC_ClearPendingIRQ(IRQn_Type IRQn)1293 __STATIC_INLINE void GIC_ClearPendingIRQ(IRQn_Type IRQn)
1294 {
1295 if (IRQn >= 16U) {
1296 GICDistributor->ICPENDR[IRQn / 32U] = 1U << (IRQn % 32U);
1297 } else {
1298 // INTID 0-15 Software Generated Interrupt
1299 GICDistributor->CPENDSGIR[IRQn / 4U] = 1U << ((IRQn % 4U) * 8U);
1300 }
1301 }
1302
1303 /** \brief Sets the interrupt configuration using GIC's ICFGR register.
1304 * \param [in] IRQn The interrupt to be configured.
1305 * \param [in] int_config Int_config field value. Bit 0: Reserved (0 - N-N model, 1 - 1-N model for some GIC before v1)
1306 * Bit 1: 0 - level sensitive, 1 - edge triggered
1307 */
GIC_SetConfiguration(IRQn_Type IRQn,uint32_t int_config)1308 __STATIC_INLINE void GIC_SetConfiguration(IRQn_Type IRQn, uint32_t int_config)
1309 {
1310 uint32_t icfgr = GICDistributor->ICFGR[IRQn / 16U];
1311 uint32_t shift = (IRQn % 16U) << 1U;
1312
1313 icfgr &= (~(3U << shift));
1314 icfgr |= ( int_config << shift);
1315
1316 GICDistributor->ICFGR[IRQn / 16U] = icfgr;
1317 }
1318
1319 /** \brief Get the interrupt configuration from the GIC's ICFGR register.
1320 * \param [in] IRQn Interrupt to acquire the configuration for.
1321 * \return Int_config field value. Bit 0: Reserved (0 - N-N model, 1 - 1-N model for some GIC before v1)
1322 * Bit 1: 0 - level sensitive, 1 - edge triggered
1323 */
GIC_GetConfiguration(IRQn_Type IRQn)1324 __STATIC_INLINE uint32_t GIC_GetConfiguration(IRQn_Type IRQn)
1325 {
1326 return (GICDistributor->ICFGR[IRQn / 16U] >> ((IRQn % 16U) >> 1U));
1327 }
1328
1329 /** \brief Set the priority for the given interrupt in the GIC's IPRIORITYR register.
1330 * \param [in] IRQn The interrupt to be configured.
1331 * \param [in] priority The priority for the interrupt, lower values denote higher priorities.
1332 */
GIC_SetPriority(IRQn_Type IRQn,uint32_t priority)1333 __STATIC_INLINE void GIC_SetPriority(IRQn_Type IRQn, uint32_t priority)
1334 {
1335 uint32_t mask = GICDistributor->IPRIORITYR[IRQn / 4U] & ~(0xFFUL << ((IRQn % 4U) * 8U));
1336 GICDistributor->IPRIORITYR[IRQn / 4U] = mask | ((priority & 0xFFUL) << ((IRQn % 4U) * 8U));
1337 }
1338
1339 /** \brief Read the current interrupt priority from GIC's IPRIORITYR register.
1340 * \param [in] IRQn The interrupt to be queried.
1341 */
GIC_GetPriority(IRQn_Type IRQn)1342 __STATIC_INLINE uint32_t GIC_GetPriority(IRQn_Type IRQn)
1343 {
1344 return (GICDistributor->IPRIORITYR[IRQn / 4U] >> ((IRQn % 4U) * 8U)) & 0xFFUL;
1345 }
1346
1347 /** \brief Set the interrupt priority mask using CPU's PMR register.
1348 * \param [in] priority Priority mask to be set.
1349 */
GIC_SetInterfacePriorityMask(uint32_t priority)1350 __STATIC_INLINE void GIC_SetInterfacePriorityMask(uint32_t priority)
1351 {
1352 GICInterface->PMR = priority & 0xFFUL; //set priority mask
1353 }
1354
1355 /** \brief Read the current interrupt priority mask from CPU's PMR register.
1356 * \result GICInterface_Type::PMR
1357 */
GIC_GetInterfacePriorityMask(void)1358 __STATIC_INLINE uint32_t GIC_GetInterfacePriorityMask(void)
1359 {
1360 return GICInterface->PMR;
1361 }
1362
1363 /** \brief Configures the group priority and subpriority split point using CPU's BPR register.
1364 * \param [in] binary_point Amount of bits used as subpriority.
1365 */
GIC_SetBinaryPoint(uint32_t binary_point)1366 __STATIC_INLINE void GIC_SetBinaryPoint(uint32_t binary_point)
1367 {
1368 GICInterface->BPR = binary_point & 7U; //set binary point
1369 }
1370
1371 /** \brief Read the current group priority and subpriority split point from CPU's BPR register.
1372 * \return GICInterface_Type::BPR
1373 */
GIC_GetBinaryPoint(void)1374 __STATIC_INLINE uint32_t GIC_GetBinaryPoint(void)
1375 {
1376 return GICInterface->BPR;
1377 }
1378
1379 /** \brief Get the status for a given interrupt.
1380 * \param [in] IRQn The interrupt to get status for.
1381 * \return 0 - not pending/active, 1 - pending, 2 - active, 3 - pending and active
1382 */
GIC_GetIRQStatus(IRQn_Type IRQn)1383 __STATIC_INLINE uint32_t GIC_GetIRQStatus(IRQn_Type IRQn)
1384 {
1385 uint32_t pending, active;
1386
1387 active = ((GICDistributor->ISACTIVER[IRQn / 32U]) >> (IRQn % 32U)) & 1UL;
1388 pending = ((GICDistributor->ISPENDR[IRQn / 32U]) >> (IRQn % 32U)) & 1UL;
1389
1390 return ((active<<1U) | pending);
1391 }
1392
1393 /** \brief Generate a software interrupt using GIC's SGIR register.
1394 * \param [in] IRQn Software interrupt to be generated.
1395 * \param [in] target_list List of CPUs the software interrupt should be forwarded to.
1396 * \param [in] filter_list Filter to be applied to determine interrupt receivers.
1397 */
GIC_SendSGI(IRQn_Type IRQn,uint32_t target_list,uint32_t filter_list)1398 __STATIC_INLINE void GIC_SendSGI(IRQn_Type IRQn, uint32_t target_list, uint32_t filter_list)
1399 {
1400 GICDistributor->SGIR = ((filter_list & 3U) << 24U) | ((target_list & 0xFFUL) << 16U) | (IRQn & 0x0FUL);
1401 }
1402
1403 /** \brief Get the interrupt number of the highest interrupt pending from CPU's HPPIR register.
1404 * \return GICInterface_Type::HPPIR
1405 */
GIC_GetHighPendingIRQ(void)1406 __STATIC_INLINE uint32_t GIC_GetHighPendingIRQ(void)
1407 {
1408 return GICInterface->HPPIR;
1409 }
1410
1411 /** \brief Provides information about the implementer and revision of the CPU interface.
1412 * \return GICInterface_Type::IIDR
1413 */
GIC_GetInterfaceId(void)1414 __STATIC_INLINE uint32_t GIC_GetInterfaceId(void)
1415 {
1416 return GICInterface->IIDR;
1417 }
1418
1419 /** \brief Set the interrupt group from the GIC's IGROUPR register.
1420 * \param [in] IRQn The interrupt to be queried.
1421 * \param [in] group Interrupt group number: 0 - Group 0, 1 - Group 1
1422 */
GIC_SetGroup(IRQn_Type IRQn,uint32_t group)1423 __STATIC_INLINE void GIC_SetGroup(IRQn_Type IRQn, uint32_t group)
1424 {
1425 uint32_t igroupr = GICDistributor->IGROUPR[IRQn / 32U];
1426 uint32_t shift = (IRQn % 32U);
1427
1428 igroupr &= (~(1U << shift));
1429 igroupr |= ( (group & 1U) << shift);
1430
1431 GICDistributor->IGROUPR[IRQn / 32U] = igroupr;
1432 }
1433 #define GIC_SetSecurity GIC_SetGroup
1434
1435 /** \brief Get the interrupt group from the GIC's IGROUPR register.
1436 * \param [in] IRQn The interrupt to be queried.
1437 * \return 0 - Group 0, 1 - Group 1
1438 */
GIC_GetGroup(IRQn_Type IRQn)1439 __STATIC_INLINE uint32_t GIC_GetGroup(IRQn_Type IRQn)
1440 {
1441 return (GICDistributor->IGROUPR[IRQn / 32U] >> (IRQn % 32U)) & 1UL;
1442 }
1443 #define GIC_GetSecurity GIC_GetGroup
1444
1445 /** \brief Initialize the interrupt distributor.
1446 */
GIC_DistInit(void)1447 __STATIC_INLINE void GIC_DistInit(void)
1448 {
1449 uint32_t i;
1450 uint32_t num_irq = 0U;
1451 uint32_t priority_field;
1452
1453 //A reset sets all bits in the IGROUPRs corresponding to the SPIs to 0,
1454 //configuring all of the interrupts as Secure.
1455
1456 //Disable interrupt forwarding
1457 GIC_DisableDistributor();
1458 //Get the maximum number of interrupts that the GIC supports
1459 num_irq = 32U * ((GIC_DistributorInfo() & 0x1FU) + 1U);
1460
1461 /* Priority level is implementation defined.
1462 To determine the number of priority bits implemented write 0xFF to an IPRIORITYR
1463 priority field and read back the value stored.*/
1464 GIC_SetPriority((IRQn_Type)0U, 0xFFU);
1465 priority_field = GIC_GetPriority((IRQn_Type)0U);
1466
1467 for (i = 32U; i < num_irq; i++)
1468 {
1469 //Disable the SPI interrupt
1470 GIC_DisableIRQ((IRQn_Type)i);
1471 //Set level-sensitive (and N-N model)
1472 GIC_SetConfiguration((IRQn_Type)i, 0U);
1473 //Set priority
1474 GIC_SetPriority((IRQn_Type)i, priority_field/2U);
1475 //Set target list to CPU0
1476 GIC_SetTarget((IRQn_Type)i, 1U);
1477 }
1478 //Enable distributor
1479 GIC_EnableDistributor();
1480 }
1481
1482 /** \brief Initialize the CPU's interrupt interface
1483 */
GIC_CPUInterfaceInit(void)1484 __STATIC_INLINE void GIC_CPUInterfaceInit(void)
1485 {
1486 uint32_t i;
1487 uint32_t priority_field;
1488
1489 //A reset sets all bits in the IGROUPRs corresponding to the SPIs to 0,
1490 //configuring all of the interrupts as Secure.
1491
1492 //Disable interrupt forwarding
1493 GIC_DisableInterface();
1494
1495 /* Priority level is implementation defined.
1496 To determine the number of priority bits implemented write 0xFF to an IPRIORITYR
1497 priority field and read back the value stored.*/
1498 GIC_SetPriority((IRQn_Type)0U, 0xFFU);
1499 priority_field = GIC_GetPriority((IRQn_Type)0U);
1500
1501 //SGI and PPI
1502 for (i = 0U; i < 32U; i++)
1503 {
1504 if(i > 15U) {
1505 //Set level-sensitive (and N-N model) for PPI
1506 GIC_SetConfiguration((IRQn_Type)i, 0U);
1507 }
1508 //Disable SGI and PPI interrupts
1509 GIC_DisableIRQ((IRQn_Type)i);
1510 //Set priority
1511 GIC_SetPriority((IRQn_Type)i, priority_field/2U);
1512 }
1513 //Enable interface
1514 GIC_EnableInterface();
1515 //Set binary point to 0
1516 GIC_SetBinaryPoint(0U);
1517 //Set priority mask
1518 GIC_SetInterfacePriorityMask(0xFFU);
1519 }
1520
1521 /** \brief Initialize and enable the GIC
1522 */
GIC_Enable(void)1523 __STATIC_INLINE void GIC_Enable(void)
1524 {
1525 GIC_DistInit();
1526 GIC_CPUInterfaceInit(); //per CPU
1527 }
1528 #endif
1529
1530 /* ########################## Generic Timer functions ############################ */
1531 #if (__TIM_PRESENT == 1U) || defined(DOXYGEN)
1532
1533 /* PL1 Physical Timer */
1534 #if (__CORTEX_A == 7U) || defined(DOXYGEN)
1535
1536 /** \brief Physical Timer Control register */
1537 typedef union
1538 {
1539 struct
1540 {
1541 uint32_t ENABLE:1; /*!< \brief bit: 0 Enables the timer. */
1542 uint32_t IMASK:1; /*!< \brief bit: 1 Timer output signal mask bit. */
1543 uint32_t ISTATUS:1; /*!< \brief bit: 2 The status of the timer. */
1544 RESERVED(0:29, uint32_t)
1545 } b; /*!< \brief Structure used for bit access */
1546 uint32_t w; /*!< \brief Type used for word access */
1547 } CNTP_CTL_Type;
1548
1549 /** \brief Configures the frequency the timer shall run at.
1550 * \param [in] value The timer frequency in Hz.
1551 */
PL1_SetCounterFrequency(uint32_t value)1552 __STATIC_INLINE void PL1_SetCounterFrequency(uint32_t value)
1553 {
1554 __set_CNTFRQ(value);
1555 __ISB();
1556 }
1557
1558 /** \brief Sets the reset value of the timer.
1559 * \param [in] value The value the timer is loaded with.
1560 */
PL1_SetLoadValue(uint32_t value)1561 __STATIC_INLINE void PL1_SetLoadValue(uint32_t value)
1562 {
1563 __set_CNTP_TVAL(value);
1564 __ISB();
1565 }
1566
1567 /** \brief Get the current counter value.
1568 * \return Current counter value.
1569 */
PL1_GetCurrentValue(void)1570 __STATIC_INLINE uint32_t PL1_GetCurrentValue(void)
1571 {
1572 return(__get_CNTP_TVAL());
1573 }
1574
1575 /** \brief Get the current physical counter value.
1576 * \return Current physical counter value.
1577 */
PL1_GetCurrentPhysicalValue(void)1578 __STATIC_INLINE uint64_t PL1_GetCurrentPhysicalValue(void)
1579 {
1580 return(__get_CNTPCT());
1581 }
1582
1583 /** \brief Set the physical compare value.
1584 * \param [in] value New physical timer compare value.
1585 */
PL1_SetPhysicalCompareValue(uint64_t value)1586 __STATIC_INLINE void PL1_SetPhysicalCompareValue(uint64_t value)
1587 {
1588 __set_CNTP_CVAL(value);
1589 __ISB();
1590 }
1591
1592 /** \brief Get the physical compare value.
1593 * \return Physical compare value.
1594 */
PL1_GetPhysicalCompareValue(void)1595 __STATIC_INLINE uint64_t PL1_GetPhysicalCompareValue(void)
1596 {
1597 return(__get_CNTP_CVAL());
1598 }
1599
1600 /** \brief Configure the timer by setting the control value.
1601 * \param [in] value New timer control value.
1602 */
PL1_SetControl(uint32_t value)1603 __STATIC_INLINE void PL1_SetControl(uint32_t value)
1604 {
1605 __set_CNTP_CTL(value);
1606 __ISB();
1607 }
1608
1609 /** \brief Get the control value.
1610 * \return Control value.
1611 */
PL1_GetControl(void)1612 __STATIC_INLINE uint32_t PL1_GetControl(void)
1613 {
1614 return(__get_CNTP_CTL());
1615 }
1616 #endif
1617
1618 /* Private Timer */
1619 #if ((__CORTEX_A == 5U) || (__CORTEX_A == 9U)) || defined(DOXYGEN)
1620 /** \brief Set the load value to timers LOAD register.
1621 * \param [in] value The load value to be set.
1622 */
PTIM_SetLoadValue(uint32_t value)1623 __STATIC_INLINE void PTIM_SetLoadValue(uint32_t value)
1624 {
1625 PTIM->LOAD = value;
1626 }
1627
1628 /** \brief Get the load value from timers LOAD register.
1629 * \return Timer_Type::LOAD
1630 */
PTIM_GetLoadValue(void)1631 __STATIC_INLINE uint32_t PTIM_GetLoadValue(void)
1632 {
1633 return(PTIM->LOAD);
1634 }
1635
1636 /** \brief Set current counter value from its COUNTER register.
1637 */
PTIM_SetCurrentValue(uint32_t value)1638 __STATIC_INLINE void PTIM_SetCurrentValue(uint32_t value)
1639 {
1640 PTIM->COUNTER = value;
1641 }
1642
1643 /** \brief Get current counter value from timers COUNTER register.
1644 * \result Timer_Type::COUNTER
1645 */
PTIM_GetCurrentValue(void)1646 __STATIC_INLINE uint32_t PTIM_GetCurrentValue(void)
1647 {
1648 return(PTIM->COUNTER);
1649 }
1650
1651 /** \brief Configure the timer using its CONTROL register.
1652 * \param [in] value The new configuration value to be set.
1653 */
PTIM_SetControl(uint32_t value)1654 __STATIC_INLINE void PTIM_SetControl(uint32_t value)
1655 {
1656 PTIM->CONTROL = value;
1657 }
1658
1659 /** ref Timer_Type::CONTROL Get the current timer configuration from its CONTROL register.
1660 * \return Timer_Type::CONTROL
1661 */
PTIM_GetControl(void)1662 __STATIC_INLINE uint32_t PTIM_GetControl(void)
1663 {
1664 return(PTIM->CONTROL);
1665 }
1666
1667 /** ref Timer_Type::CONTROL Get the event flag in timers ISR register.
1668 * \return 0 - flag is not set, 1- flag is set
1669 */
PTIM_GetEventFlag(void)1670 __STATIC_INLINE uint32_t PTIM_GetEventFlag(void)
1671 {
1672 return (PTIM->ISR & 1UL);
1673 }
1674
1675 /** ref Timer_Type::CONTROL Clears the event flag in timers ISR register.
1676 */
PTIM_ClearEventFlag(void)1677 __STATIC_INLINE void PTIM_ClearEventFlag(void)
1678 {
1679 PTIM->ISR = 1;
1680 }
1681 #endif
1682 #endif
1683
1684 /* ########################## MMU functions ###################################### */
1685
1686 #define SECTION_DESCRIPTOR (0x2)
1687 #define SECTION_MASK (0xFFFFFFFC)
1688
1689 #define SECTION_TEXCB_MASK (0xFFFF8FF3)
1690 #define SECTION_B_SHIFT (2)
1691 #define SECTION_C_SHIFT (3)
1692 #define SECTION_TEX0_SHIFT (12)
1693 #define SECTION_TEX1_SHIFT (13)
1694 #define SECTION_TEX2_SHIFT (14)
1695
1696 #define SECTION_XN_MASK (0xFFFFFFEF)
1697 #define SECTION_XN_SHIFT (4)
1698
1699 #define SECTION_DOMAIN_MASK (0xFFFFFE1F)
1700 #define SECTION_DOMAIN_SHIFT (5)
1701
1702 #define SECTION_P_MASK (0xFFFFFDFF)
1703 #define SECTION_P_SHIFT (9)
1704
1705 #define SECTION_AP_MASK (0xFFFF73FF)
1706 #define SECTION_AP_SHIFT (10)
1707 #define SECTION_AP2_SHIFT (15)
1708
1709 #define SECTION_S_MASK (0xFFFEFFFF)
1710 #define SECTION_S_SHIFT (16)
1711
1712 #define SECTION_NG_MASK (0xFFFDFFFF)
1713 #define SECTION_NG_SHIFT (17)
1714
1715 #define SECTION_NS_MASK (0xFFF7FFFF)
1716 #define SECTION_NS_SHIFT (19)
1717
1718 #define PAGE_L1_DESCRIPTOR (0x1)
1719 #define PAGE_L1_MASK (0xFFFFFFFC)
1720
1721 #define PAGE_L2_4K_DESC (0x2)
1722 #define PAGE_L2_4K_MASK (0xFFFFFFFD)
1723
1724 #define PAGE_L2_64K_DESC (0x1)
1725 #define PAGE_L2_64K_MASK (0xFFFFFFFC)
1726
1727 #define PAGE_4K_TEXCB_MASK (0xFFFFFE33)
1728 #define PAGE_4K_B_SHIFT (2)
1729 #define PAGE_4K_C_SHIFT (3)
1730 #define PAGE_4K_TEX0_SHIFT (6)
1731 #define PAGE_4K_TEX1_SHIFT (7)
1732 #define PAGE_4K_TEX2_SHIFT (8)
1733
1734 #define PAGE_64K_TEXCB_MASK (0xFFFF8FF3)
1735 #define PAGE_64K_B_SHIFT (2)
1736 #define PAGE_64K_C_SHIFT (3)
1737 #define PAGE_64K_TEX0_SHIFT (12)
1738 #define PAGE_64K_TEX1_SHIFT (13)
1739 #define PAGE_64K_TEX2_SHIFT (14)
1740
1741 #define PAGE_TEXCB_MASK (0xFFFF8FF3)
1742 #define PAGE_B_SHIFT (2)
1743 #define PAGE_C_SHIFT (3)
1744 #define PAGE_TEX_SHIFT (12)
1745
1746 #define PAGE_XN_4K_MASK (0xFFFFFFFE)
1747 #define PAGE_XN_4K_SHIFT (0)
1748 #define PAGE_XN_64K_MASK (0xFFFF7FFF)
1749 #define PAGE_XN_64K_SHIFT (15)
1750
1751 #define PAGE_DOMAIN_MASK (0xFFFFFE1F)
1752 #define PAGE_DOMAIN_SHIFT (5)
1753
1754 #define PAGE_P_MASK (0xFFFFFDFF)
1755 #define PAGE_P_SHIFT (9)
1756
1757 #define PAGE_AP_MASK (0xFFFFFDCF)
1758 #define PAGE_AP_SHIFT (4)
1759 #define PAGE_AP2_SHIFT (9)
1760
1761 #define PAGE_S_MASK (0xFFFFFBFF)
1762 #define PAGE_S_SHIFT (10)
1763
1764 #define PAGE_NG_MASK (0xFFFFF7FF)
1765 #define PAGE_NG_SHIFT (11)
1766
1767 #define PAGE_NS_MASK (0xFFFFFFF7)
1768 #define PAGE_NS_SHIFT (3)
1769
1770 #define OFFSET_1M (0x00100000)
1771 #define OFFSET_64K (0x00010000)
1772 #define OFFSET_4K (0x00001000)
1773
1774 #define DESCRIPTOR_FAULT (0x00000000)
1775
1776 /* Attributes enumerations */
1777
1778 /* Region size attributes */
1779 typedef enum
1780 {
1781 SECTION,
1782 PAGE_4k,
1783 PAGE_64k,
1784 } mmu_region_size_Type;
1785
1786 /* Region type attributes */
1787 typedef enum
1788 {
1789 NORMAL,
1790 DEVICE,
1791 SHARED_DEVICE,
1792 NON_SHARED_DEVICE,
1793 STRONGLY_ORDERED
1794 } mmu_memory_Type;
1795
1796 /* Region cacheability attributes */
1797 typedef enum
1798 {
1799 NON_CACHEABLE,
1800 WB_WA,
1801 WT,
1802 WB_NO_WA,
1803 } mmu_cacheability_Type;
1804
1805 /* Region parity check attributes */
1806 typedef enum
1807 {
1808 ECC_DISABLED,
1809 ECC_ENABLED,
1810 } mmu_ecc_check_Type;
1811
1812 /* Region execution attributes */
1813 typedef enum
1814 {
1815 EXECUTE,
1816 NON_EXECUTE,
1817 } mmu_execute_Type;
1818
1819 /* Region global attributes */
1820 typedef enum
1821 {
1822 GLOBAL,
1823 NON_GLOBAL,
1824 } mmu_global_Type;
1825
1826 /* Region shareability attributes */
1827 typedef enum
1828 {
1829 NON_SHARED,
1830 SHARED,
1831 } mmu_shared_Type;
1832
1833 /* Region security attributes */
1834 typedef enum
1835 {
1836 SECURE,
1837 NON_SECURE,
1838 } mmu_secure_Type;
1839
1840 /* Region access attributes */
1841 typedef enum
1842 {
1843 NO_ACCESS,
1844 RW,
1845 READ,
1846 } mmu_access_Type;
1847
1848 /* Memory Region definition */
1849 typedef struct RegionStruct {
1850 mmu_region_size_Type rg_t;
1851 mmu_memory_Type mem_t;
1852 uint8_t domain;
1853 mmu_cacheability_Type inner_norm_t;
1854 mmu_cacheability_Type outer_norm_t;
1855 mmu_ecc_check_Type e_t;
1856 mmu_execute_Type xn_t;
1857 mmu_global_Type g_t;
1858 mmu_secure_Type sec_t;
1859 mmu_access_Type priv_t;
1860 mmu_access_Type user_t;
1861 mmu_shared_Type sh_t;
1862
1863 } mmu_region_attributes_Type;
1864
1865 //Following macros define the descriptors and attributes
1866 //Sect_Normal. Outer & inner wb/wa, non-shareable, executable, rw, domain 0
1867 #define section_normal(descriptor_l1, region) region.rg_t = SECTION; \
1868 region.domain = 0x0; \
1869 region.e_t = ECC_DISABLED; \
1870 region.g_t = GLOBAL; \
1871 region.inner_norm_t = WB_WA; \
1872 region.outer_norm_t = WB_WA; \
1873 region.mem_t = NORMAL; \
1874 region.sec_t = SECURE; \
1875 region.xn_t = EXECUTE; \
1876 region.priv_t = RW; \
1877 region.user_t = RW; \
1878 region.sh_t = NON_SHARED; \
1879 MMU_GetSectionDescriptor(&descriptor_l1, region);
1880
1881 //Sect_Normal_NC. Outer & inner non-cacheable, non-shareable, executable, rw, domain 0
1882 #define section_normal_nc(descriptor_l1, region) region.rg_t = SECTION; \
1883 region.domain = 0x0; \
1884 region.e_t = ECC_DISABLED; \
1885 region.g_t = GLOBAL; \
1886 region.inner_norm_t = NON_CACHEABLE; \
1887 region.outer_norm_t = NON_CACHEABLE; \
1888 region.mem_t = NORMAL; \
1889 region.sec_t = SECURE; \
1890 region.xn_t = EXECUTE; \
1891 region.priv_t = RW; \
1892 region.user_t = RW; \
1893 region.sh_t = NON_SHARED; \
1894 MMU_GetSectionDescriptor(&descriptor_l1, region);
1895
1896 //Sect_Normal_Cod. Outer & inner wb/wa, non-shareable, executable, ro, domain 0
1897 #define section_normal_cod(descriptor_l1, region) region.rg_t = SECTION; \
1898 region.domain = 0x0; \
1899 region.e_t = ECC_DISABLED; \
1900 region.g_t = GLOBAL; \
1901 region.inner_norm_t = WB_WA; \
1902 region.outer_norm_t = WB_WA; \
1903 region.mem_t = NORMAL; \
1904 region.sec_t = SECURE; \
1905 region.xn_t = EXECUTE; \
1906 region.priv_t = READ; \
1907 region.user_t = READ; \
1908 region.sh_t = NON_SHARED; \
1909 MMU_GetSectionDescriptor(&descriptor_l1, region);
1910
1911 //Sect_Normal_RO. Sect_Normal_Cod, but not executable
1912 #define section_normal_ro(descriptor_l1, region) region.rg_t = SECTION; \
1913 region.domain = 0x0; \
1914 region.e_t = ECC_DISABLED; \
1915 region.g_t = GLOBAL; \
1916 region.inner_norm_t = WB_WA; \
1917 region.outer_norm_t = WB_WA; \
1918 region.mem_t = NORMAL; \
1919 region.sec_t = SECURE; \
1920 region.xn_t = NON_EXECUTE; \
1921 region.priv_t = READ; \
1922 region.user_t = READ; \
1923 region.sh_t = NON_SHARED; \
1924 MMU_GetSectionDescriptor(&descriptor_l1, region);
1925
1926 //Sect_Normal_RW. Sect_Normal_Cod, but writeable and not executable
1927 #define section_normal_rw(descriptor_l1, region) region.rg_t = SECTION; \
1928 region.domain = 0x0; \
1929 region.e_t = ECC_DISABLED; \
1930 region.g_t = GLOBAL; \
1931 region.inner_norm_t = WB_WA; \
1932 region.outer_norm_t = WB_WA; \
1933 region.mem_t = NORMAL; \
1934 region.sec_t = SECURE; \
1935 region.xn_t = NON_EXECUTE; \
1936 region.priv_t = RW; \
1937 region.user_t = RW; \
1938 region.sh_t = NON_SHARED; \
1939 MMU_GetSectionDescriptor(&descriptor_l1, region);
1940 //Sect_SO. Strongly-ordered (therefore shareable), not executable, rw, domain 0, base addr 0
1941 #define section_so(descriptor_l1, region) region.rg_t = SECTION; \
1942 region.domain = 0x0; \
1943 region.e_t = ECC_DISABLED; \
1944 region.g_t = GLOBAL; \
1945 region.inner_norm_t = NON_CACHEABLE; \
1946 region.outer_norm_t = NON_CACHEABLE; \
1947 region.mem_t = STRONGLY_ORDERED; \
1948 region.sec_t = SECURE; \
1949 region.xn_t = NON_EXECUTE; \
1950 region.priv_t = RW; \
1951 region.user_t = RW; \
1952 region.sh_t = NON_SHARED; \
1953 MMU_GetSectionDescriptor(&descriptor_l1, region);
1954
1955 //Sect_Device_RO. Device, non-shareable, non-executable, ro, domain 0, base addr 0
1956 #define section_device_ro(descriptor_l1, region) region.rg_t = SECTION; \
1957 region.domain = 0x0; \
1958 region.e_t = ECC_DISABLED; \
1959 region.g_t = GLOBAL; \
1960 region.inner_norm_t = NON_CACHEABLE; \
1961 region.outer_norm_t = NON_CACHEABLE; \
1962 region.mem_t = STRONGLY_ORDERED; \
1963 region.sec_t = SECURE; \
1964 region.xn_t = NON_EXECUTE; \
1965 region.priv_t = READ; \
1966 region.user_t = READ; \
1967 region.sh_t = NON_SHARED; \
1968 MMU_GetSectionDescriptor(&descriptor_l1, region);
1969
1970 //Sect_Device_RW. Sect_Device_RO, but writeable
1971 #define section_device_rw(descriptor_l1, region) region.rg_t = SECTION; \
1972 region.domain = 0x0; \
1973 region.e_t = ECC_DISABLED; \
1974 region.g_t = GLOBAL; \
1975 region.inner_norm_t = NON_CACHEABLE; \
1976 region.outer_norm_t = NON_CACHEABLE; \
1977 region.mem_t = STRONGLY_ORDERED; \
1978 region.sec_t = SECURE; \
1979 region.xn_t = NON_EXECUTE; \
1980 region.priv_t = RW; \
1981 region.user_t = RW; \
1982 region.sh_t = NON_SHARED; \
1983 MMU_GetSectionDescriptor(&descriptor_l1, region);
1984 //Page_4k_Device_RW. Shared device, not executable, rw, domain 0
1985 #define page4k_device_rw(descriptor_l1, descriptor_l2, region) region.rg_t = PAGE_4k; \
1986 region.domain = 0x0; \
1987 region.e_t = ECC_DISABLED; \
1988 region.g_t = GLOBAL; \
1989 region.inner_norm_t = NON_CACHEABLE; \
1990 region.outer_norm_t = NON_CACHEABLE; \
1991 region.mem_t = SHARED_DEVICE; \
1992 region.sec_t = SECURE; \
1993 region.xn_t = NON_EXECUTE; \
1994 region.priv_t = RW; \
1995 region.user_t = RW; \
1996 region.sh_t = NON_SHARED; \
1997 MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region);
1998
1999 //Page_64k_Device_RW. Shared device, not executable, rw, domain 0
2000 #define page64k_device_rw(descriptor_l1, descriptor_l2, region) region.rg_t = PAGE_64k; \
2001 region.domain = 0x0; \
2002 region.e_t = ECC_DISABLED; \
2003 region.g_t = GLOBAL; \
2004 region.inner_norm_t = NON_CACHEABLE; \
2005 region.outer_norm_t = NON_CACHEABLE; \
2006 region.mem_t = SHARED_DEVICE; \
2007 region.sec_t = SECURE; \
2008 region.xn_t = NON_EXECUTE; \
2009 region.priv_t = RW; \
2010 region.user_t = RW; \
2011 region.sh_t = NON_SHARED; \
2012 MMU_GetPageDescriptor(&descriptor_l1, &descriptor_l2, region);
2013
2014 /** \brief Set section execution-never attribute
2015
2016 \param [out] descriptor_l1 L1 descriptor.
2017 \param [in] xn Section execution-never attribute : EXECUTE , NON_EXECUTE.
2018
2019 \return 0
2020 */
MMU_XNSection(uint32_t * descriptor_l1,mmu_execute_Type xn)2021 __STATIC_INLINE int MMU_XNSection(uint32_t *descriptor_l1, mmu_execute_Type xn)
2022 {
2023 *descriptor_l1 &= SECTION_XN_MASK;
2024 *descriptor_l1 |= ((xn & 0x1) << SECTION_XN_SHIFT);
2025 return 0;
2026 }
2027
2028 /** \brief Set section domain
2029
2030 \param [out] descriptor_l1 L1 descriptor.
2031 \param [in] domain Section domain
2032
2033 \return 0
2034 */
MMU_DomainSection(uint32_t * descriptor_l1,uint8_t domain)2035 __STATIC_INLINE int MMU_DomainSection(uint32_t *descriptor_l1, uint8_t domain)
2036 {
2037 *descriptor_l1 &= SECTION_DOMAIN_MASK;
2038 *descriptor_l1 |= ((domain & 0xF) << SECTION_DOMAIN_SHIFT);
2039 return 0;
2040 }
2041
2042 /** \brief Set section parity check
2043
2044 \param [out] descriptor_l1 L1 descriptor.
2045 \param [in] p_bit Parity check: ECC_DISABLED, ECC_ENABLED
2046
2047 \return 0
2048 */
MMU_PSection(uint32_t * descriptor_l1,mmu_ecc_check_Type p_bit)2049 __STATIC_INLINE int MMU_PSection(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit)
2050 {
2051 *descriptor_l1 &= SECTION_P_MASK;
2052 *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT);
2053 return 0;
2054 }
2055
2056 /** \brief Set section access privileges
2057
2058 \param [out] descriptor_l1 L1 descriptor.
2059 \param [in] user User Level Access: NO_ACCESS, RW, READ
2060 \param [in] priv Privilege Level Access: NO_ACCESS, RW, READ
2061 \param [in] afe Access flag enable
2062
2063 \return 0
2064 */
MMU_APSection(uint32_t * descriptor_l1,mmu_access_Type user,mmu_access_Type priv,uint32_t afe)2065 __STATIC_INLINE int MMU_APSection(uint32_t *descriptor_l1, mmu_access_Type user, mmu_access_Type priv, uint32_t afe)
2066 {
2067 uint32_t ap = 0;
2068
2069 if (afe == 0) { //full access
2070 if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; }
2071 else if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; }
2072 else if ((priv == RW) && (user == READ)) { ap = 0x2; }
2073 else if ((priv == RW) && (user == RW)) { ap = 0x3; }
2074 else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
2075 else if ((priv == READ) && (user == READ)) { ap = 0x7; }
2076 }
2077
2078 else { //Simplified access
2079 if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; }
2080 else if ((priv == RW) && (user == RW)) { ap = 0x3; }
2081 else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
2082 else if ((priv == READ) && (user == READ)) { ap = 0x7; }
2083 }
2084
2085 *descriptor_l1 &= SECTION_AP_MASK;
2086 *descriptor_l1 |= (ap & 0x3) << SECTION_AP_SHIFT;
2087 *descriptor_l1 |= ((ap & 0x4)>>2) << SECTION_AP2_SHIFT;
2088
2089 return 0;
2090 }
2091
2092 /** \brief Set section shareability
2093
2094 \param [out] descriptor_l1 L1 descriptor.
2095 \param [in] s_bit Section shareability: NON_SHARED, SHARED
2096
2097 \return 0
2098 */
MMU_SharedSection(uint32_t * descriptor_l1,mmu_shared_Type s_bit)2099 __STATIC_INLINE int MMU_SharedSection(uint32_t *descriptor_l1, mmu_shared_Type s_bit)
2100 {
2101 *descriptor_l1 &= SECTION_S_MASK;
2102 *descriptor_l1 |= ((s_bit & 0x1) << SECTION_S_SHIFT);
2103 return 0;
2104 }
2105
2106 /** \brief Set section Global attribute
2107
2108 \param [out] descriptor_l1 L1 descriptor.
2109 \param [in] g_bit Section attribute: GLOBAL, NON_GLOBAL
2110
2111 \return 0
2112 */
MMU_GlobalSection(uint32_t * descriptor_l1,mmu_global_Type g_bit)2113 __STATIC_INLINE int MMU_GlobalSection(uint32_t *descriptor_l1, mmu_global_Type g_bit)
2114 {
2115 *descriptor_l1 &= SECTION_NG_MASK;
2116 *descriptor_l1 |= ((g_bit & 0x1) << SECTION_NG_SHIFT);
2117 return 0;
2118 }
2119
2120 /** \brief Set section Security attribute
2121
2122 \param [out] descriptor_l1 L1 descriptor.
2123 \param [in] s_bit Section Security attribute: SECURE, NON_SECURE
2124
2125 \return 0
2126 */
MMU_SecureSection(uint32_t * descriptor_l1,mmu_secure_Type s_bit)2127 __STATIC_INLINE int MMU_SecureSection(uint32_t *descriptor_l1, mmu_secure_Type s_bit)
2128 {
2129 *descriptor_l1 &= SECTION_NS_MASK;
2130 *descriptor_l1 |= ((s_bit & 0x1) << SECTION_NS_SHIFT);
2131 return 0;
2132 }
2133
2134 /* Page 4k or 64k */
2135 /** \brief Set 4k/64k page execution-never attribute
2136
2137 \param [out] descriptor_l2 L2 descriptor.
2138 \param [in] xn Page execution-never attribute : EXECUTE , NON_EXECUTE.
2139 \param [in] page Page size: PAGE_4k, PAGE_64k,
2140
2141 \return 0
2142 */
MMU_XNPage(uint32_t * descriptor_l2,mmu_execute_Type xn,mmu_region_size_Type page)2143 __STATIC_INLINE int MMU_XNPage(uint32_t *descriptor_l2, mmu_execute_Type xn, mmu_region_size_Type page)
2144 {
2145 if (page == PAGE_4k)
2146 {
2147 *descriptor_l2 &= PAGE_XN_4K_MASK;
2148 *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_4K_SHIFT);
2149 }
2150 else
2151 {
2152 *descriptor_l2 &= PAGE_XN_64K_MASK;
2153 *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_64K_SHIFT);
2154 }
2155 return 0;
2156 }
2157
2158 /** \brief Set 4k/64k page domain
2159
2160 \param [out] descriptor_l1 L1 descriptor.
2161 \param [in] domain Page domain
2162
2163 \return 0
2164 */
MMU_DomainPage(uint32_t * descriptor_l1,uint8_t domain)2165 __STATIC_INLINE int MMU_DomainPage(uint32_t *descriptor_l1, uint8_t domain)
2166 {
2167 *descriptor_l1 &= PAGE_DOMAIN_MASK;
2168 *descriptor_l1 |= ((domain & 0xf) << PAGE_DOMAIN_SHIFT);
2169 return 0;
2170 }
2171
2172 /** \brief Set 4k/64k page parity check
2173
2174 \param [out] descriptor_l1 L1 descriptor.
2175 \param [in] p_bit Parity check: ECC_DISABLED, ECC_ENABLED
2176
2177 \return 0
2178 */
MMU_PPage(uint32_t * descriptor_l1,mmu_ecc_check_Type p_bit)2179 __STATIC_INLINE int MMU_PPage(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit)
2180 {
2181 *descriptor_l1 &= SECTION_P_MASK;
2182 *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT);
2183 return 0;
2184 }
2185
2186 /** \brief Set 4k/64k page access privileges
2187
2188 \param [out] descriptor_l2 L2 descriptor.
2189 \param [in] user User Level Access: NO_ACCESS, RW, READ
2190 \param [in] priv Privilege Level Access: NO_ACCESS, RW, READ
2191 \param [in] afe Access flag enable
2192
2193 \return 0
2194 */
MMU_APPage(uint32_t * descriptor_l2,mmu_access_Type user,mmu_access_Type priv,uint32_t afe)2195 __STATIC_INLINE int MMU_APPage(uint32_t *descriptor_l2, mmu_access_Type user, mmu_access_Type priv, uint32_t afe)
2196 {
2197 uint32_t ap = 0;
2198
2199 if (afe == 0) { //full access
2200 if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; }
2201 else if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; }
2202 else if ((priv == RW) && (user == READ)) { ap = 0x2; }
2203 else if ((priv == RW) && (user == RW)) { ap = 0x3; }
2204 else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
2205 else if ((priv == READ) && (user == READ)) { ap = 0x6; }
2206 }
2207
2208 else { //Simplified access
2209 if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; }
2210 else if ((priv == RW) && (user == RW)) { ap = 0x3; }
2211 else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
2212 else if ((priv == READ) && (user == READ)) { ap = 0x7; }
2213 }
2214
2215 *descriptor_l2 &= PAGE_AP_MASK;
2216 *descriptor_l2 |= (ap & 0x3) << PAGE_AP_SHIFT;
2217 *descriptor_l2 |= ((ap & 0x4)>>2) << PAGE_AP2_SHIFT;
2218
2219 return 0;
2220 }
2221
2222 /** \brief Set 4k/64k page shareability
2223
2224 \param [out] descriptor_l2 L2 descriptor.
2225 \param [in] s_bit 4k/64k page shareability: NON_SHARED, SHARED
2226
2227 \return 0
2228 */
MMU_SharedPage(uint32_t * descriptor_l2,mmu_shared_Type s_bit)2229 __STATIC_INLINE int MMU_SharedPage(uint32_t *descriptor_l2, mmu_shared_Type s_bit)
2230 {
2231 *descriptor_l2 &= PAGE_S_MASK;
2232 *descriptor_l2 |= ((s_bit & 0x1) << PAGE_S_SHIFT);
2233 return 0;
2234 }
2235
2236 /** \brief Set 4k/64k page Global attribute
2237
2238 \param [out] descriptor_l2 L2 descriptor.
2239 \param [in] g_bit 4k/64k page attribute: GLOBAL, NON_GLOBAL
2240
2241 \return 0
2242 */
MMU_GlobalPage(uint32_t * descriptor_l2,mmu_global_Type g_bit)2243 __STATIC_INLINE int MMU_GlobalPage(uint32_t *descriptor_l2, mmu_global_Type g_bit)
2244 {
2245 *descriptor_l2 &= PAGE_NG_MASK;
2246 *descriptor_l2 |= ((g_bit & 0x1) << PAGE_NG_SHIFT);
2247 return 0;
2248 }
2249
2250 /** \brief Set 4k/64k page Security attribute
2251
2252 \param [out] descriptor_l1 L1 descriptor.
2253 \param [in] s_bit 4k/64k page Security attribute: SECURE, NON_SECURE
2254
2255 \return 0
2256 */
MMU_SecurePage(uint32_t * descriptor_l1,mmu_secure_Type s_bit)2257 __STATIC_INLINE int MMU_SecurePage(uint32_t *descriptor_l1, mmu_secure_Type s_bit)
2258 {
2259 *descriptor_l1 &= PAGE_NS_MASK;
2260 *descriptor_l1 |= ((s_bit & 0x1) << PAGE_NS_SHIFT);
2261 return 0;
2262 }
2263
2264 /** \brief Set Section memory attributes
2265
2266 \param [out] descriptor_l1 L1 descriptor.
2267 \param [in] mem Section memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED
2268 \param [in] outer Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
2269 \param [in] inner Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
2270
2271 \return 0
2272 */
MMU_MemorySection(uint32_t * descriptor_l1,mmu_memory_Type mem,mmu_cacheability_Type outer,mmu_cacheability_Type inner)2273 __STATIC_INLINE int MMU_MemorySection(uint32_t *descriptor_l1, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner)
2274 {
2275 *descriptor_l1 &= SECTION_TEXCB_MASK;
2276
2277 if (STRONGLY_ORDERED == mem)
2278 {
2279 return 0;
2280 }
2281 else if (SHARED_DEVICE == mem)
2282 {
2283 *descriptor_l1 |= (1 << SECTION_B_SHIFT);
2284 }
2285 else if (NON_SHARED_DEVICE == mem)
2286 {
2287 *descriptor_l1 |= (1 << SECTION_TEX1_SHIFT);
2288 }
2289 else if (NORMAL == mem)
2290 {
2291 *descriptor_l1 |= 1 << SECTION_TEX2_SHIFT;
2292 switch(inner)
2293 {
2294 case NON_CACHEABLE:
2295 break;
2296 case WB_WA:
2297 *descriptor_l1 |= (1 << SECTION_B_SHIFT);
2298 break;
2299 case WT:
2300 *descriptor_l1 |= 1 << SECTION_C_SHIFT;
2301 break;
2302 case WB_NO_WA:
2303 *descriptor_l1 |= (1 << SECTION_B_SHIFT) | (1 << SECTION_C_SHIFT);
2304 break;
2305 }
2306 switch(outer)
2307 {
2308 case NON_CACHEABLE:
2309 break;
2310 case WB_WA:
2311 *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT);
2312 break;
2313 case WT:
2314 *descriptor_l1 |= 1 << SECTION_TEX1_SHIFT;
2315 break;
2316 case WB_NO_WA:
2317 *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT) | (1 << SECTION_TEX0_SHIFT);
2318 break;
2319 }
2320 }
2321 return 0;
2322 }
2323
2324 /** \brief Set 4k/64k page memory attributes
2325
2326 \param [out] descriptor_l2 L2 descriptor.
2327 \param [in] mem 4k/64k page memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED
2328 \param [in] outer Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
2329 \param [in] inner Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
2330 \param [in] page Page size
2331
2332 \return 0
2333 */
MMU_MemoryPage(uint32_t * descriptor_l2,mmu_memory_Type mem,mmu_cacheability_Type outer,mmu_cacheability_Type inner,mmu_region_size_Type page)2334 __STATIC_INLINE int MMU_MemoryPage(uint32_t *descriptor_l2, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner, mmu_region_size_Type page)
2335 {
2336 *descriptor_l2 &= PAGE_4K_TEXCB_MASK;
2337
2338 if (page == PAGE_64k)
2339 {
2340 //same as section
2341 MMU_MemorySection(descriptor_l2, mem, outer, inner);
2342 }
2343 else
2344 {
2345 if (STRONGLY_ORDERED == mem)
2346 {
2347 return 0;
2348 }
2349 else if (SHARED_DEVICE == mem)
2350 {
2351 *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT);
2352 }
2353 else if (NON_SHARED_DEVICE == mem)
2354 {
2355 *descriptor_l2 |= (1 << PAGE_4K_TEX1_SHIFT);
2356 }
2357 else if (NORMAL == mem)
2358 {
2359 *descriptor_l2 |= 1 << PAGE_4K_TEX2_SHIFT;
2360 switch(inner)
2361 {
2362 case NON_CACHEABLE:
2363 break;
2364 case WB_WA:
2365 *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT);
2366 break;
2367 case WT:
2368 *descriptor_l2 |= 1 << PAGE_4K_C_SHIFT;
2369 break;
2370 case WB_NO_WA:
2371 *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT) | (1 << PAGE_4K_C_SHIFT);
2372 break;
2373 }
2374 switch(outer)
2375 {
2376 case NON_CACHEABLE:
2377 break;
2378 case WB_WA:
2379 *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT);
2380 break;
2381 case WT:
2382 *descriptor_l2 |= 1 << PAGE_4K_TEX1_SHIFT;
2383 break;
2384 case WB_NO_WA:
2385 *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT) | (1 << PAGE_4K_TEX0_SHIFT);
2386 break;
2387 }
2388 }
2389 }
2390
2391 return 0;
2392 }
2393
2394 /** \brief Create a L1 section descriptor
2395
2396 \param [out] descriptor L1 descriptor
2397 \param [in] reg Section attributes
2398
2399 \return 0
2400 */
MMU_GetSectionDescriptor(uint32_t * descriptor,mmu_region_attributes_Type reg)2401 __STATIC_INLINE int MMU_GetSectionDescriptor(uint32_t *descriptor, mmu_region_attributes_Type reg)
2402 {
2403 *descriptor = 0;
2404
2405 MMU_MemorySection(descriptor, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t);
2406 MMU_XNSection(descriptor,reg.xn_t);
2407 MMU_DomainSection(descriptor, reg.domain);
2408 MMU_PSection(descriptor, reg.e_t);
2409 MMU_APSection(descriptor, reg.priv_t, reg.user_t, 1);
2410 MMU_SharedSection(descriptor,reg.sh_t);
2411 MMU_GlobalSection(descriptor,reg.g_t);
2412 MMU_SecureSection(descriptor,reg.sec_t);
2413 *descriptor &= SECTION_MASK;
2414 *descriptor |= SECTION_DESCRIPTOR;
2415
2416 return 0;
2417 }
2418
2419
2420 /** \brief Create a L1 and L2 4k/64k page descriptor
2421
2422 \param [out] descriptor L1 descriptor
2423 \param [out] descriptor2 L2 descriptor
2424 \param [in] reg 4k/64k page attributes
2425
2426 \return 0
2427 */
MMU_GetPageDescriptor(uint32_t * descriptor,uint32_t * descriptor2,mmu_region_attributes_Type reg)2428 __STATIC_INLINE int MMU_GetPageDescriptor(uint32_t *descriptor, uint32_t *descriptor2, mmu_region_attributes_Type reg)
2429 {
2430 *descriptor = 0;
2431 *descriptor2 = 0;
2432
2433 switch (reg.rg_t)
2434 {
2435 case PAGE_4k:
2436 MMU_MemoryPage(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_4k);
2437 MMU_XNPage(descriptor2, reg.xn_t, PAGE_4k);
2438 MMU_DomainPage(descriptor, reg.domain);
2439 MMU_PPage(descriptor, reg.e_t);
2440 MMU_APPage(descriptor2, reg.priv_t, reg.user_t, 1);
2441 MMU_SharedPage(descriptor2,reg.sh_t);
2442 MMU_GlobalPage(descriptor2,reg.g_t);
2443 MMU_SecurePage(descriptor,reg.sec_t);
2444 *descriptor &= PAGE_L1_MASK;
2445 *descriptor |= PAGE_L1_DESCRIPTOR;
2446 *descriptor2 &= PAGE_L2_4K_MASK;
2447 *descriptor2 |= PAGE_L2_4K_DESC;
2448 break;
2449
2450 case PAGE_64k:
2451 MMU_MemoryPage(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_64k);
2452 MMU_XNPage(descriptor2, reg.xn_t, PAGE_64k);
2453 MMU_DomainPage(descriptor, reg.domain);
2454 MMU_PPage(descriptor, reg.e_t);
2455 MMU_APPage(descriptor2, reg.priv_t, reg.user_t, 1);
2456 MMU_SharedPage(descriptor2,reg.sh_t);
2457 MMU_GlobalPage(descriptor2,reg.g_t);
2458 MMU_SecurePage(descriptor,reg.sec_t);
2459 *descriptor &= PAGE_L1_MASK;
2460 *descriptor |= PAGE_L1_DESCRIPTOR;
2461 *descriptor2 &= PAGE_L2_64K_MASK;
2462 *descriptor2 |= PAGE_L2_64K_DESC;
2463 break;
2464
2465 case SECTION:
2466 //error
2467 break;
2468 }
2469
2470 return 0;
2471 }
2472
2473 /** \brief Create a 1MB Section
2474
2475 \param [in] ttb Translation table base address
2476 \param [in] base_address Section base address
2477 \param [in] count Number of sections to create
2478 \param [in] descriptor_l1 L1 descriptor (region attributes)
2479
2480 */
MMU_TTSection(uint32_t * ttb,uint32_t base_address,uint32_t count,uint32_t descriptor_l1)2481 __STATIC_INLINE void MMU_TTSection(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1)
2482 {
2483 uint32_t offset;
2484 uint32_t entry;
2485 uint32_t i;
2486
2487 offset = base_address >> 20;
2488 entry = (base_address & 0xFFF00000) | descriptor_l1;
2489
2490 //4 bytes aligned
2491 ttb = ttb + offset;
2492
2493 for (i = 0; i < count; i++ )
2494 {
2495 //4 bytes aligned
2496 *ttb++ = entry;
2497 entry += OFFSET_1M;
2498 }
2499 }
2500
2501 /** \brief Create a 4k page entry
2502
2503 \param [in] ttb L1 table base address
2504 \param [in] base_address 4k base address
2505 \param [in] count Number of 4k pages to create
2506 \param [in] descriptor_l1 L1 descriptor (region attributes)
2507 \param [in] ttb_l2 L2 table base address
2508 \param [in] descriptor_l2 L2 descriptor (region attributes)
2509
2510 */
MMU_TTPage4k(uint32_t * ttb,uint32_t base_address,uint32_t count,uint32_t descriptor_l1,uint32_t * ttb_l2,uint32_t descriptor_l2)2511 __STATIC_INLINE void MMU_TTPage4k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 )
2512 {
2513
2514 uint32_t offset, offset2;
2515 uint32_t entry, entry2;
2516 uint32_t i;
2517
2518 offset = base_address >> 20;
2519 entry = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1;
2520
2521 //4 bytes aligned
2522 ttb += offset;
2523 //create l1_entry
2524 *ttb = entry;
2525
2526 offset2 = (base_address & 0xff000) >> 12;
2527 ttb_l2 += offset2;
2528 entry2 = (base_address & 0xFFFFF000) | descriptor_l2;
2529 for (i = 0; i < count; i++ )
2530 {
2531 //4 bytes aligned
2532 *ttb_l2++ = entry2;
2533 entry2 += OFFSET_4K;
2534 }
2535 }
2536
2537 /** \brief Create a 64k page entry
2538
2539 \param [in] ttb L1 table base address
2540 \param [in] base_address 64k base address
2541 \param [in] count Number of 64k pages to create
2542 \param [in] descriptor_l1 L1 descriptor (region attributes)
2543 \param [in] ttb_l2 L2 table base address
2544 \param [in] descriptor_l2 L2 descriptor (region attributes)
2545
2546 */
MMU_TTPage64k(uint32_t * ttb,uint32_t base_address,uint32_t count,uint32_t descriptor_l1,uint32_t * ttb_l2,uint32_t descriptor_l2)2547 __STATIC_INLINE void MMU_TTPage64k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 )
2548 {
2549 uint32_t offset, offset2;
2550 uint32_t entry, entry2;
2551 uint32_t i,j;
2552
2553
2554 offset = base_address >> 20;
2555 entry = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1;
2556
2557 //4 bytes aligned
2558 ttb += offset;
2559 //create l1_entry
2560 *ttb = entry;
2561
2562 offset2 = (base_address & 0xff000) >> 12;
2563 ttb_l2 += offset2;
2564 entry2 = (base_address & 0xFFFF0000) | descriptor_l2;
2565 for (i = 0; i < count; i++ )
2566 {
2567 //create 16 entries
2568 for (j = 0; j < 16; j++)
2569 {
2570 //4 bytes aligned
2571 *ttb_l2++ = entry2;
2572 }
2573 entry2 += OFFSET_64K;
2574 }
2575 }
2576
2577 /** \brief Enable MMU
2578 */
MMU_Enable(void)2579 __STATIC_INLINE void MMU_Enable(void)
2580 {
2581 // Set M bit 0 to enable the MMU
2582 // Set AFE bit to enable simplified access permissions model
2583 // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking
2584 __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29));
2585 __ISB();
2586 }
2587
2588 /** \brief Disable MMU
2589 */
MMU_Disable(void)2590 __STATIC_INLINE void MMU_Disable(void)
2591 {
2592 // Clear M bit 0 to disable the MMU
2593 __set_SCTLR( __get_SCTLR() & ~1);
2594 __ISB();
2595 }
2596
2597 /** \brief Invalidate entire unified TLB
2598 */
2599
MMU_InvalidateTLB(void)2600 __STATIC_INLINE void MMU_InvalidateTLB(void)
2601 {
2602 __set_TLBIALL(0);
2603 __DSB(); //ensure completion of the invalidation
2604 __ISB(); //ensure instruction fetch path sees new state
2605 }
2606
2607
2608 #ifdef __cplusplus
2609 }
2610 #endif
2611
2612 #endif /* __CORE_CA_H_DEPENDANT */
2613
2614 #endif /* __CMSIS_GENERIC */
2615