xref: /aosp_15_r20/external/arm-trusted-firmware/include/arch/aarch32/smccc_helpers.h (revision 54fd6939e177f8ff529b10183254802c76df6d08)
1*54fd6939SJiyong Park /*
2*54fd6939SJiyong Park  * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
3*54fd6939SJiyong Park  *
4*54fd6939SJiyong Park  * SPDX-License-Identifier: BSD-3-Clause
5*54fd6939SJiyong Park  */
6*54fd6939SJiyong Park 
7*54fd6939SJiyong Park #ifndef SMCCC_HELPERS_H
8*54fd6939SJiyong Park #define SMCCC_HELPERS_H
9*54fd6939SJiyong Park 
10*54fd6939SJiyong Park #include <lib/smccc.h>
11*54fd6939SJiyong Park 
12*54fd6939SJiyong Park /* These are offsets to registers in smc_ctx_t */
13*54fd6939SJiyong Park #define SMC_CTX_GPREG_R0	U(0x0)
14*54fd6939SJiyong Park #define SMC_CTX_GPREG_R1	U(0x4)
15*54fd6939SJiyong Park #define SMC_CTX_GPREG_R2	U(0x8)
16*54fd6939SJiyong Park #define SMC_CTX_GPREG_R3	U(0xC)
17*54fd6939SJiyong Park #define SMC_CTX_GPREG_R4	U(0x10)
18*54fd6939SJiyong Park #define SMC_CTX_GPREG_R5	U(0x14)
19*54fd6939SJiyong Park #define SMC_CTX_SP_USR		U(0x34)
20*54fd6939SJiyong Park #define SMC_CTX_SPSR_MON	U(0x78)
21*54fd6939SJiyong Park #define SMC_CTX_SP_MON		U(0x7C)
22*54fd6939SJiyong Park #define SMC_CTX_LR_MON		U(0x80)
23*54fd6939SJiyong Park #define SMC_CTX_SCR		U(0x84)
24*54fd6939SJiyong Park #define SMC_CTX_PMCR		U(0x88)
25*54fd6939SJiyong Park #define SMC_CTX_SIZE		U(0x90)
26*54fd6939SJiyong Park 
27*54fd6939SJiyong Park #ifndef __ASSEMBLER__
28*54fd6939SJiyong Park 
29*54fd6939SJiyong Park #include <stdint.h>
30*54fd6939SJiyong Park 
31*54fd6939SJiyong Park #include <lib/cassert.h>
32*54fd6939SJiyong Park 
33*54fd6939SJiyong Park /*
34*54fd6939SJiyong Park  * The generic structure to save arguments and callee saved registers during
35*54fd6939SJiyong Park  * an SMC. Also this structure is used to store the result return values after
36*54fd6939SJiyong Park  * the completion of SMC service.
37*54fd6939SJiyong Park  */
38*54fd6939SJiyong Park typedef struct smc_ctx {
39*54fd6939SJiyong Park 	u_register_t r0;
40*54fd6939SJiyong Park 	u_register_t r1;
41*54fd6939SJiyong Park 	u_register_t r2;
42*54fd6939SJiyong Park 	u_register_t r3;
43*54fd6939SJiyong Park 	u_register_t r4;
44*54fd6939SJiyong Park 	u_register_t r5;
45*54fd6939SJiyong Park 	u_register_t r6;
46*54fd6939SJiyong Park 	u_register_t r7;
47*54fd6939SJiyong Park 	u_register_t r8;
48*54fd6939SJiyong Park 	u_register_t r9;
49*54fd6939SJiyong Park 	u_register_t r10;
50*54fd6939SJiyong Park 	u_register_t r11;
51*54fd6939SJiyong Park 	u_register_t r12;
52*54fd6939SJiyong Park 	/* spsr_usr doesn't exist */
53*54fd6939SJiyong Park 	u_register_t sp_usr;
54*54fd6939SJiyong Park 	u_register_t lr_usr;
55*54fd6939SJiyong Park 	u_register_t spsr_irq;
56*54fd6939SJiyong Park 	u_register_t sp_irq;
57*54fd6939SJiyong Park 	u_register_t lr_irq;
58*54fd6939SJiyong Park 	u_register_t spsr_fiq;
59*54fd6939SJiyong Park 	u_register_t sp_fiq;
60*54fd6939SJiyong Park 	u_register_t lr_fiq;
61*54fd6939SJiyong Park 	u_register_t spsr_svc;
62*54fd6939SJiyong Park 	u_register_t sp_svc;
63*54fd6939SJiyong Park 	u_register_t lr_svc;
64*54fd6939SJiyong Park 	u_register_t spsr_abt;
65*54fd6939SJiyong Park 	u_register_t sp_abt;
66*54fd6939SJiyong Park 	u_register_t lr_abt;
67*54fd6939SJiyong Park 	u_register_t spsr_und;
68*54fd6939SJiyong Park 	u_register_t sp_und;
69*54fd6939SJiyong Park 	u_register_t lr_und;
70*54fd6939SJiyong Park 	u_register_t spsr_mon;
71*54fd6939SJiyong Park 	/*
72*54fd6939SJiyong Park 	 * `sp_mon` will point to the C runtime stack in monitor mode. But prior
73*54fd6939SJiyong Park 	 * to exit from SMC, this will point to the `smc_ctx_t` so that
74*54fd6939SJiyong Park 	 * on next entry due to SMC, the `smc_ctx_t` can be easily accessed.
75*54fd6939SJiyong Park 	 */
76*54fd6939SJiyong Park 	u_register_t sp_mon;
77*54fd6939SJiyong Park 	u_register_t lr_mon;
78*54fd6939SJiyong Park 	u_register_t scr;
79*54fd6939SJiyong Park 	u_register_t pmcr;
80*54fd6939SJiyong Park 	/*
81*54fd6939SJiyong Park 	 * The workaround for CVE-2017-5715 requires storing information in
82*54fd6939SJiyong Park 	 * the bottom 3 bits of the stack pointer.  Add a padding field to
83*54fd6939SJiyong Park 	 * force the size of the struct to be a multiple of 8.
84*54fd6939SJiyong Park 	 */
85*54fd6939SJiyong Park 	u_register_t pad;
86*54fd6939SJiyong Park } smc_ctx_t __aligned(8);
87*54fd6939SJiyong Park 
88*54fd6939SJiyong Park /*
89*54fd6939SJiyong Park  * Compile time assertions related to the 'smc_context' structure to
90*54fd6939SJiyong Park  * ensure that the assembler and the compiler view of the offsets of
91*54fd6939SJiyong Park  * the structure members is the same.
92*54fd6939SJiyong Park  */
93*54fd6939SJiyong Park CASSERT(SMC_CTX_GPREG_R0 == __builtin_offsetof(smc_ctx_t, r0), \
94*54fd6939SJiyong Park 	assert_smc_ctx_greg_r0_offset_mismatch);
95*54fd6939SJiyong Park CASSERT(SMC_CTX_GPREG_R1 == __builtin_offsetof(smc_ctx_t, r1), \
96*54fd6939SJiyong Park 	assert_smc_ctx_greg_r1_offset_mismatch);
97*54fd6939SJiyong Park CASSERT(SMC_CTX_GPREG_R2 == __builtin_offsetof(smc_ctx_t, r2), \
98*54fd6939SJiyong Park 	assert_smc_ctx_greg_r2_offset_mismatch);
99*54fd6939SJiyong Park CASSERT(SMC_CTX_GPREG_R3 == __builtin_offsetof(smc_ctx_t, r3), \
100*54fd6939SJiyong Park 	assert_smc_ctx_greg_r3_offset_mismatch);
101*54fd6939SJiyong Park CASSERT(SMC_CTX_GPREG_R4 == __builtin_offsetof(smc_ctx_t, r4), \
102*54fd6939SJiyong Park 	assert_smc_ctx_greg_r4_offset_mismatch);
103*54fd6939SJiyong Park CASSERT(SMC_CTX_SP_USR == __builtin_offsetof(smc_ctx_t, sp_usr), \
104*54fd6939SJiyong Park 	assert_smc_ctx_sp_usr_offset_mismatch);
105*54fd6939SJiyong Park CASSERT(SMC_CTX_LR_MON == __builtin_offsetof(smc_ctx_t, lr_mon), \
106*54fd6939SJiyong Park 	assert_smc_ctx_lr_mon_offset_mismatch);
107*54fd6939SJiyong Park CASSERT(SMC_CTX_SPSR_MON == __builtin_offsetof(smc_ctx_t, spsr_mon), \
108*54fd6939SJiyong Park 	assert_smc_ctx_spsr_mon_offset_mismatch);
109*54fd6939SJiyong Park 
110*54fd6939SJiyong Park CASSERT((sizeof(smc_ctx_t) & 0x7U) == 0U, assert_smc_ctx_not_aligned);
111*54fd6939SJiyong Park CASSERT(SMC_CTX_SIZE == sizeof(smc_ctx_t), assert_smc_ctx_size_mismatch);
112*54fd6939SJiyong Park 
113*54fd6939SJiyong Park /* Convenience macros to return from SMC handler */
114*54fd6939SJiyong Park #define SMC_RET0(_h) {				\
115*54fd6939SJiyong Park 	return (uintptr_t)(_h);			\
116*54fd6939SJiyong Park }
117*54fd6939SJiyong Park #define SMC_RET1(_h, _r0) {			\
118*54fd6939SJiyong Park 	((smc_ctx_t *)(_h))->r0 = (_r0);	\
119*54fd6939SJiyong Park 	SMC_RET0(_h);				\
120*54fd6939SJiyong Park }
121*54fd6939SJiyong Park #define SMC_RET2(_h, _r0, _r1) {		\
122*54fd6939SJiyong Park 	((smc_ctx_t *)(_h))->r1 = (_r1);	\
123*54fd6939SJiyong Park 	SMC_RET1(_h, (_r0));			\
124*54fd6939SJiyong Park }
125*54fd6939SJiyong Park #define SMC_RET3(_h, _r0, _r1, _r2) {		\
126*54fd6939SJiyong Park 	((smc_ctx_t *)(_h))->r2 = (_r2);	\
127*54fd6939SJiyong Park 	SMC_RET2(_h, (_r0), (_r1));		\
128*54fd6939SJiyong Park }
129*54fd6939SJiyong Park #define SMC_RET4(_h, _r0, _r1, _r2, _r3) {	\
130*54fd6939SJiyong Park 	((smc_ctx_t *)(_h))->r3 = (_r3);	\
131*54fd6939SJiyong Park 	SMC_RET3(_h, (_r0), (_r1), (_r2));	\
132*54fd6939SJiyong Park }
133*54fd6939SJiyong Park #define SMC_RET5(_h, _r0, _r1, _r2, _r3, _r4) {	\
134*54fd6939SJiyong Park 	((smc_ctx_t *)(_h))->r4 = (_r4);	\
135*54fd6939SJiyong Park 	SMC_RET4(_h, (_r0), (_r1), (_r2), (_r3));	\
136*54fd6939SJiyong Park }
137*54fd6939SJiyong Park #define SMC_RET6(_h, _r0, _r1, _r2, _r3, _r4, _r5) {	\
138*54fd6939SJiyong Park 	((smc_ctx_t *)(_h))->r5 = (_r5);	\
139*54fd6939SJiyong Park 	SMC_RET5(_h, (_r0), (_r1), (_r2), (_r3), (_r4));	\
140*54fd6939SJiyong Park }
141*54fd6939SJiyong Park #define SMC_RET7(_h, _r0, _r1, _r2, _r3, _r4, _r5, _r6) {	\
142*54fd6939SJiyong Park 	((smc_ctx_t *)(_h))->r6 = (_r6);	\
143*54fd6939SJiyong Park 	SMC_RET6(_h, (_r0), (_r1), (_r2), (_r3), (_r4), (_r5));	\
144*54fd6939SJiyong Park }
145*54fd6939SJiyong Park #define SMC_RET8(_h, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) {	\
146*54fd6939SJiyong Park 	((smc_ctx_t *)(_h))->r7 = (_r7);	\
147*54fd6939SJiyong Park 	SMC_RET7(_h, (_r0), (_r1), (_r2), (_r3), (_r4), (_r5), (_r6));	\
148*54fd6939SJiyong Park }
149*54fd6939SJiyong Park 
150*54fd6939SJiyong Park /*
151*54fd6939SJiyong Park  * Helper macro to retrieve the SMC parameters from smc_ctx_t.
152*54fd6939SJiyong Park  */
153*54fd6939SJiyong Park #define get_smc_params_from_ctx(_hdl, _r1, _r2, _r3, _r4) {	\
154*54fd6939SJiyong Park 		_r1 = ((smc_ctx_t *)_hdl)->r1;		\
155*54fd6939SJiyong Park 		_r2 = ((smc_ctx_t *)_hdl)->r2;		\
156*54fd6939SJiyong Park 		_r3 = ((smc_ctx_t *)_hdl)->r3;		\
157*54fd6939SJiyong Park 		_r4 = ((smc_ctx_t *)_hdl)->r4;		\
158*54fd6939SJiyong Park 		}
159*54fd6939SJiyong Park 
160*54fd6939SJiyong Park /* ------------------------------------------------------------------------
161*54fd6939SJiyong Park  * Helper APIs for setting and retrieving appropriate `smc_ctx_t`.
162*54fd6939SJiyong Park  * These functions need to implemented by the BL including this library.
163*54fd6939SJiyong Park  * ------------------------------------------------------------------------
164*54fd6939SJiyong Park  */
165*54fd6939SJiyong Park 
166*54fd6939SJiyong Park /* Get the pointer to `smc_ctx_t` corresponding to the security state. */
167*54fd6939SJiyong Park void *smc_get_ctx(unsigned int security_state);
168*54fd6939SJiyong Park 
169*54fd6939SJiyong Park /* Set the next `smc_ctx_t` corresponding to the security state. */
170*54fd6939SJiyong Park void smc_set_next_ctx(unsigned int security_state);
171*54fd6939SJiyong Park 
172*54fd6939SJiyong Park /* Get the pointer to next `smc_ctx_t` already set by `smc_set_next_ctx()`. */
173*54fd6939SJiyong Park void *smc_get_next_ctx(void);
174*54fd6939SJiyong Park 
175*54fd6939SJiyong Park #endif /*__ASSEMBLER__*/
176*54fd6939SJiyong Park 
177*54fd6939SJiyong Park #endif /* SMCCC_HELPERS_H */
178