xref: /aosp_15_r20/trusty/kernel/lib/arm_ffa/arm_ffa.c (revision 344aa361028b423587d4ef3fa52a23d194628137)
1 /*
2  * Copyright (c) 2019-2020 LK Trusty Authors. All Rights Reserved.
3  * Copyright (c) 2022, Arm Limited. All rights reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files
7  * (the "Software"), to deal in the Software without restriction,
8  * including without limitation the rights to use, copy, modify, merge,
9  * publish, distribute, sublicense, and/or sell copies of the Software,
10  * and to permit persons to whom the Software is furnished to do so,
11  * subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be
14  * included in all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
20  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #define LOCAL_TRACE 0
26 
27 #include <assert.h>
28 #include <err.h>
29 #include <interface/arm_ffa/arm_ffa.h>
30 #include <inttypes.h>
31 #include <kernel/mutex.h>
32 #include <kernel/vm.h>
33 #include <lib/arm_ffa/arm_ffa.h>
34 #include <lib/smc/smc.h>
35 #include <lk/init.h>
36 #include <lk/macros.h>
37 #include <string.h>
38 #include <sys/types.h>
39 #include <trace.h>
40 
41 static bool arm_ffa_init_is_success = false;
42 static uint16_t ffa_local_id;
43 static size_t ffa_buf_size;
44 static void* ffa_tx;
45 static void* ffa_rx;
46 static bool supports_ns_bit = false;
47 static bool supports_rx_release = false;
48 static bool console_log_is_unsupported;
49 
50 static mutex_t ffa_rxtx_buffer_lock = MUTEX_INITIAL_VALUE(ffa_rxtx_buffer_lock);
51 
arm_ffa_is_init(void)52 bool arm_ffa_is_init(void) {
53     return arm_ffa_init_is_success;
54 }
55 
arm_ffa_call_id_get(uint16_t * id)56 static status_t arm_ffa_call_id_get(uint16_t* id) {
57     struct smc_ret8 smc_ret;
58 
59     smc_ret = smc8(SMC_FC_FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0);
60 
61     switch (smc_ret.r0) {
62     case SMC_FC_FFA_SUCCESS:
63     case SMC_FC64_FFA_SUCCESS:
64         if (smc_ret.r2 & ~0xFFFFUL) {
65             TRACEF("Unexpected FFA_ID_GET result: %lx\n", smc_ret.r2);
66             return ERR_NOT_VALID;
67         }
68         *id = (uint16_t)(smc_ret.r2 & 0xFFFF);
69         return NO_ERROR;
70 
71     case SMC_FC_FFA_ERROR:
72         if (smc_ret.r2 == (ulong)FFA_ERROR_NOT_SUPPORTED) {
73             return ERR_NOT_SUPPORTED;
74         } else {
75             TRACEF("Unexpected FFA_ERROR: %lx\n", smc_ret.r2);
76             return ERR_NOT_VALID;
77         }
78 
79     default:
80         TRACEF("Unexpected FFA SMC: %lx\n", smc_ret.r0);
81         return ERR_NOT_VALID;
82     }
83 }
84 
arm_ffa_call_version(uint16_t major,uint16_t minor,uint16_t * major_ret,uint16_t * minor_ret)85 static status_t arm_ffa_call_version(uint16_t major,
86                                      uint16_t minor,
87                                      uint16_t* major_ret,
88                                      uint16_t* minor_ret) {
89     struct smc_ret8 smc_ret;
90 
91     uint32_t version = FFA_VERSION(major, minor);
92     /* Bit 31 must be cleared. */
93     ASSERT(!(version >> 31));
94     smc_ret = smc8(SMC_FC_FFA_VERSION, version, 0, 0, 0, 0, 0, 0);
95     if (smc_ret.r0 == (ulong)FFA_ERROR_NOT_SUPPORTED) {
96         return ERR_NOT_SUPPORTED;
97     }
98     *major_ret = FFA_VERSION_TO_MAJOR(smc_ret.r0);
99     *minor_ret = FFA_VERSION_TO_MINOR(smc_ret.r0);
100 
101     return NO_ERROR;
102 }
103 
104 /* TODO: When adding support for FFA version 1.1 feature ids should be added. */
arm_ffa_call_features(ulong id,bool * is_implemented,ffa_features2_t * features2,ffa_features3_t * features3)105 static status_t arm_ffa_call_features(ulong id,
106                                       bool* is_implemented,
107                                       ffa_features2_t* features2,
108                                       ffa_features3_t* features3) {
109     struct smc_ret8 smc_ret;
110 
111     ASSERT(is_implemented);
112 
113     /*
114      * According to the FF-A spec section "Discovery of NS bit usage",
115      * NS_BIT is optionally set by a v1.0 SP such as Trusty, and must
116      * be set by a v1.1+ SP. Here, we set it unconditionally for the
117      * relevant feature.
118      */
119     bool request_ns_bit = (id == SMC_FC_FFA_MEM_RETRIEVE_REQ) ||
120                           (id == SMC_FC64_FFA_MEM_RETRIEVE_REQ);
121     smc_ret = smc8(SMC_FC_FFA_FEATURES, id,
122                    request_ns_bit ? FFA_FEATURES2_MEM_RETRIEVE_REQ_NS_BIT : 0,
123                    0, 0, 0, 0, 0);
124 
125     switch (smc_ret.r0) {
126     case SMC_FC_FFA_SUCCESS:
127     case SMC_FC64_FFA_SUCCESS:
128         *is_implemented = true;
129         if (features2) {
130             *features2 = (ffa_features2_t)smc_ret.r2;
131         }
132         if (features3) {
133             *features3 = (ffa_features3_t)smc_ret.r3;
134         }
135         return NO_ERROR;
136 
137     case SMC_FC_FFA_ERROR:
138         if (smc_ret.r2 == (ulong)FFA_ERROR_NOT_SUPPORTED) {
139             *is_implemented = false;
140             return NO_ERROR;
141         } else {
142             TRACEF("Unexpected FFA_ERROR: %lx\n", smc_ret.r2);
143             return ERR_NOT_VALID;
144         }
145 
146     default:
147         TRACEF("Unexpected FFA SMC: %lx\n", smc_ret.r0);
148         return ERR_NOT_VALID;
149     }
150 }
151 
152 /*
153  * Call with ffa_rxtx_buffer_lock acquired and the ffa_tx buffer already
154  * populated with struct ffa_mtd. Transmit in a single fragment.
155  */
arm_ffa_call_mem_retrieve_req(uint32_t * total_len,uint32_t * fragment_len)156 static status_t arm_ffa_call_mem_retrieve_req(uint32_t* total_len,
157                                               uint32_t* fragment_len) {
158     struct smc_ret8 smc_ret;
159     struct ffa_mtd* req = ffa_tx;
160     size_t len;
161 
162     DEBUG_ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
163 
164     len = offsetof(struct ffa_mtd, emad[0]) +
165           req->emad_count * sizeof(struct ffa_emad);
166 
167     smc_ret = smc8(SMC_FC_FFA_MEM_RETRIEVE_REQ, len, len, 0, 0, 0, 0, 0);
168 
169     long error;
170     switch (smc_ret.r0) {
171     case SMC_FC_FFA_MEM_RETRIEVE_RESP:
172         if (total_len) {
173             *total_len = (uint32_t)smc_ret.r1;
174         }
175         if (fragment_len) {
176             *fragment_len = (uint32_t)smc_ret.r2;
177         }
178         return NO_ERROR;
179     case SMC_FC_FFA_ERROR:
180         error = (long)smc_ret.r2;
181         switch (error) {
182         case FFA_ERROR_NOT_SUPPORTED:
183             return ERR_NOT_SUPPORTED;
184         case FFA_ERROR_INVALID_PARAMETERS:
185             return ERR_INVALID_ARGS;
186         case FFA_ERROR_NO_MEMORY:
187             return ERR_NO_MEMORY;
188         case FFA_ERROR_DENIED:
189             return ERR_BAD_STATE;
190         case FFA_ERROR_ABORTED:
191             return ERR_CANCELLED;
192         default:
193             TRACEF("Unknown error: 0x%lx\n", error);
194             return ERR_NOT_VALID;
195         }
196     default:
197         return ERR_NOT_VALID;
198     }
199 }
200 
arm_ffa_call_mem_frag_rx(uint64_t handle,uint32_t offset,uint32_t * fragment_len)201 static status_t arm_ffa_call_mem_frag_rx(uint64_t handle,
202                                          uint32_t offset,
203                                          uint32_t* fragment_len) {
204     struct smc_ret8 smc_ret;
205 
206     DEBUG_ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
207 
208     smc_ret = smc8(SMC_FC_FFA_MEM_FRAG_RX, (uint32_t)handle, handle >> 32,
209                    offset, 0, 0, 0, 0);
210 
211     /* FRAG_RX is followed by FRAG_TX on successful completion. */
212     switch (smc_ret.r0) {
213     case SMC_FC_FFA_MEM_FRAG_TX: {
214         uint64_t handle_out = smc_ret.r1 + ((uint64_t)smc_ret.r2 << 32);
215         if (handle != handle_out) {
216             TRACEF("Handle for response doesn't match the request, %" PRId64
217                    " != %" PRId64,
218                    handle, handle_out);
219             return ERR_NOT_VALID;
220         }
221         *fragment_len = smc_ret.r3;
222         return NO_ERROR;
223     }
224     case SMC_FC_FFA_ERROR:
225         switch ((int)smc_ret.r2) {
226         case FFA_ERROR_NOT_SUPPORTED:
227             return ERR_NOT_SUPPORTED;
228         case FFA_ERROR_INVALID_PARAMETERS:
229             return ERR_INVALID_ARGS;
230         case FFA_ERROR_ABORTED:
231             return ERR_CANCELLED;
232         default:
233             TRACEF("Unexpected error %d\n", (int)smc_ret.r2);
234             return ERR_NOT_VALID;
235         }
236     default:
237         TRACEF("Unexpected function id returned 0x%08lx\n", smc_ret.r0);
238         return ERR_NOT_VALID;
239     }
240 }
241 
arm_ffa_call_mem_relinquish(uint64_t handle,uint32_t flags,uint32_t endpoint_count,const ffa_endpoint_id16_t * endpoints)242 static status_t arm_ffa_call_mem_relinquish(
243         uint64_t handle,
244         uint32_t flags,
245         uint32_t endpoint_count,
246         const ffa_endpoint_id16_t* endpoints) {
247     struct smc_ret8 smc_ret;
248     struct ffa_mem_relinquish_descriptor* req = ffa_tx;
249 
250     if (!req) {
251         TRACEF("ERROR: no FF-A tx buffer\n");
252         return ERR_NOT_CONFIGURED;
253     }
254     ASSERT(endpoint_count <=
255            (ffa_buf_size - sizeof(struct ffa_mem_relinquish_descriptor)) /
256                    sizeof(ffa_endpoint_id16_t));
257 
258     mutex_acquire(&ffa_rxtx_buffer_lock);
259 
260     req->handle = handle;
261     req->flags = flags;
262     req->endpoint_count = endpoint_count;
263 
264     memcpy(req->endpoint_array, endpoints,
265            endpoint_count * sizeof(ffa_endpoint_id16_t));
266 
267     smc_ret = smc8(SMC_FC_FFA_MEM_RELINQUISH, 0, 0, 0, 0, 0, 0, 0);
268 
269     mutex_release(&ffa_rxtx_buffer_lock);
270 
271     switch (smc_ret.r0) {
272     case SMC_FC_FFA_SUCCESS:
273     case SMC_FC64_FFA_SUCCESS:
274         return NO_ERROR;
275 
276     case SMC_FC_FFA_ERROR:
277         switch ((int)smc_ret.r2) {
278         case FFA_ERROR_NOT_SUPPORTED:
279             return ERR_NOT_SUPPORTED;
280         case FFA_ERROR_INVALID_PARAMETERS:
281             return ERR_INVALID_ARGS;
282         case FFA_ERROR_NO_MEMORY:
283             return ERR_NO_MEMORY;
284         case FFA_ERROR_DENIED:
285             return ERR_BAD_STATE;
286         case FFA_ERROR_ABORTED:
287             return ERR_CANCELLED;
288         default:
289             TRACEF("Unexpected FFA_ERROR: %lx\n", smc_ret.r2);
290             return ERR_NOT_VALID;
291         }
292     default:
293         TRACEF("Unexpected FFA SMC: %lx\n", smc_ret.r0);
294         return ERR_NOT_VALID;
295     }
296 }
297 
arm_ffa_call_rxtx_map(paddr_t tx_paddr,paddr_t rx_paddr,size_t page_count)298 static status_t arm_ffa_call_rxtx_map(paddr_t tx_paddr,
299                                       paddr_t rx_paddr,
300                                       size_t page_count) {
301     struct smc_ret8 smc_ret;
302 
303     /* Page count specified in bits [0:5] */
304     ASSERT(page_count);
305     ASSERT(page_count < (1 << 6));
306 
307 #if ARCH_ARM64
308     smc_ret = smc8(SMC_FC64_FFA_RXTX_MAP, tx_paddr, rx_paddr, page_count, 0, 0,
309                    0, 0);
310 #else
311     smc_ret = smc8(SMC_FC_FFA_RXTX_MAP, tx_paddr, rx_paddr, page_count, 0, 0, 0,
312                    0);
313 #endif
314     switch (smc_ret.r0) {
315     case SMC_FC_FFA_SUCCESS:
316     case SMC_FC64_FFA_SUCCESS:
317         return NO_ERROR;
318 
319     case SMC_FC_FFA_ERROR:
320         switch ((int)smc_ret.r2) {
321         case FFA_ERROR_NOT_SUPPORTED:
322             return ERR_NOT_SUPPORTED;
323         case FFA_ERROR_INVALID_PARAMETERS:
324             return ERR_INVALID_ARGS;
325         case FFA_ERROR_NO_MEMORY:
326             return ERR_NO_MEMORY;
327         case FFA_ERROR_DENIED:
328             return ERR_ALREADY_EXISTS;
329         default:
330             TRACEF("Unexpected FFA_ERROR: %lx\n", smc_ret.r2);
331             return ERR_NOT_VALID;
332         }
333     default:
334         TRACEF("Unexpected FFA SMC: %lx\n", smc_ret.r0);
335         return ERR_NOT_VALID;
336     }
337 }
338 
arm_ffa_call_rx_release(void)339 static status_t arm_ffa_call_rx_release(void) {
340     struct smc_ret8 smc_ret;
341 
342     DEBUG_ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
343 
344     smc_ret = smc8(SMC_FC_FFA_RX_RELEASE, 0, 0, 0, 0, 0, 0, 0);
345     switch (smc_ret.r0) {
346     case SMC_FC_FFA_SUCCESS:
347     case SMC_FC64_FFA_SUCCESS:
348         return NO_ERROR;
349 
350     case SMC_FC_FFA_ERROR:
351         switch ((int)smc_ret.r2) {
352         case FFA_ERROR_NOT_SUPPORTED:
353             return ERR_NOT_SUPPORTED;
354         case FFA_ERROR_DENIED:
355             return ERR_BAD_STATE;
356         default:
357             return ERR_NOT_VALID;
358         }
359     default:
360         return ERR_NOT_VALID;
361     }
362 }
363 
364 #if WITH_SMP
ffa_call_secondary_ep_register(void)365 static status_t ffa_call_secondary_ep_register(void) {
366     struct smc_ret8 smc_ret;
367     paddr_t secondary_ep_paddr;
368     extern char _start[];
369 
370     secondary_ep_paddr = vaddr_to_paddr(_start);
371 
372     smc_ret = smc8(SMC_FC64_FFA_SECONDARY_EP_REGISTER, secondary_ep_paddr, 0, 0,
373                    0, 0, 0, 0);
374     switch ((uint32_t)smc_ret.r0) {
375     case SMC_FC_FFA_SUCCESS:
376     case SMC_FC64_FFA_SUCCESS:
377         return NO_ERROR;
378 
379     case SMC_FC_FFA_ERROR:
380         switch ((int)smc_ret.r2) {
381         case FFA_ERROR_NOT_SUPPORTED:
382             return ERR_NOT_SUPPORTED;
383         case FFA_ERROR_INVALID_PARAMETERS:
384             return ERR_INVALID_ARGS;
385         default:
386             return ERR_NOT_VALID;
387         }
388 
389     case SMC_UNKNOWN:
390         return ERR_NOT_SUPPORTED;
391 
392     default:
393         return ERR_NOT_VALID;
394     }
395 }
396 #endif /* WITH_SMP */
397 
arm_ffa_call_error(enum ffa_error err)398 struct smc_ret8 arm_ffa_call_error(enum ffa_error err) {
399     long target = 0; /* Target must be zero (MBZ) at secure FF-A instances */
400     return smc8(SMC_FC_FFA_ERROR, target, (ulong)err, 0, 0, 0, 0, 0);
401 }
402 
arm_ffa_call_msg_wait(void)403 struct smc_ret8 arm_ffa_call_msg_wait(void) {
404     return smc8(SMC_FC_FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0);
405 }
406 
arm_ffa_msg_send_direct_resp(const struct smc_ret8 * direct_req_regs,ulong a0,ulong a1,ulong a2,ulong a3,ulong a4)407 struct smc_ret8 arm_ffa_msg_send_direct_resp(
408         const struct smc_ret8* direct_req_regs,
409         ulong a0,
410         ulong a1,
411         ulong a2,
412         ulong a3,
413         ulong a4) {
414     ulong fid;
415     uint32_t sender_receiver_id;
416     uint32_t flags;
417 
418     DEBUG_ASSERT(direct_req_regs);
419     switch (direct_req_regs->r0) {
420     case SMC_FC_FFA_MSG_SEND_DIRECT_REQ:
421         fid = SMC_FC_FFA_MSG_SEND_DIRECT_RESP;
422         break;
423     case SMC_FC64_FFA_MSG_SEND_DIRECT_REQ:
424         fid = SMC_FC64_FFA_MSG_SEND_DIRECT_RESP;
425         break;
426     default:
427         dprintf(CRITICAL, "Invalid direct request function id %lx\n",
428                 direct_req_regs->r0);
429         return arm_ffa_call_error(FFA_ERROR_INVALID_PARAMETERS);
430     }
431 
432     /* Copy and flip the sender from the direct message request */
433     sender_receiver_id =
434             (direct_req_regs->r1 >> 16) | ((uint32_t)ffa_local_id << 16);
435     /* Copy the flags as well */
436     flags = direct_req_regs->r2;
437 
438     return smc8(fid, sender_receiver_id, flags, a0, a1, a2, a3, a4);
439 }
440 
arm_ffa_console_log(const char * buf,size_t len)441 ssize_t arm_ffa_console_log(const char* buf, size_t len) {
442     struct smc_ret8 smc_ret;
443 
444     if (console_log_is_unsupported) {
445         return ERR_NOT_SUPPORTED;
446     }
447     if (!len) {
448         /* Nothing to print, just return */
449         return 0;
450     }
451     if (len != 1) {
452         /* TODO: support more than one character */
453         len = 1;
454     }
455 
456     smc_ret = smc8(SMC_FC_FFA_CONSOLE_LOG, len, (ulong)buf[0], 0, 0, 0, 0, 0);
457     switch ((uint32_t)smc_ret.r0) {
458     case SMC_FC_FFA_SUCCESS:
459     case SMC_FC64_FFA_SUCCESS:
460         return len;
461 
462     case SMC_FC_FFA_ERROR:
463         switch ((int32_t)smc_ret.r2) {
464         case FFA_ERROR_NOT_SUPPORTED:
465             console_log_is_unsupported = true;
466             return ERR_NOT_SUPPORTED;
467         case FFA_ERROR_INVALID_PARAMETERS:
468             return ERR_INVALID_ARGS;
469         case FFA_ERROR_RETRY:
470             /* FFA_ERROR_RETRY returns how many characters were printed */
471             return (uint32_t)smc_ret.r3;
472         default:
473             return ERR_NOT_VALID;
474         }
475 
476     case SMC_UNKNOWN:
477         console_log_is_unsupported = true;
478         return ERR_NOT_SUPPORTED;
479 
480     default:
481         return ERR_NOT_VALID;
482     }
483 }
484 
arm_ffa_rx_release_is_implemented(bool * is_implemented)485 static status_t arm_ffa_rx_release_is_implemented(bool* is_implemented) {
486     bool is_implemented_val;
487     status_t res = arm_ffa_call_features(SMC_FC_FFA_RX_RELEASE,
488                                          &is_implemented_val, NULL, NULL);
489     if (res != NO_ERROR) {
490         TRACEF("Failed to query for feature FFA_RX_RELEASE, err = %d\n", res);
491         return res;
492     }
493     if (is_implemented) {
494         *is_implemented = is_implemented_val;
495     }
496     return NO_ERROR;
497 }
498 
arm_ffa_rxtx_map_is_implemented(bool * is_implemented,size_t * buf_size_log2)499 static status_t arm_ffa_rxtx_map_is_implemented(bool* is_implemented,
500                                                 size_t* buf_size_log2) {
501     ffa_features2_t features2;
502     bool is_implemented_val = false;
503     status_t res;
504 
505     ASSERT(is_implemented);
506 #if ARCH_ARM64
507     res = arm_ffa_call_features(SMC_FC64_FFA_RXTX_MAP, &is_implemented_val,
508                                 &features2, NULL);
509 #else
510     res = arm_ffa_call_features(SMC_FC_FFA_RXTX_MAP, &is_implemented_val,
511                                 &features2, NULL);
512 #endif
513     if (res != NO_ERROR) {
514         TRACEF("Failed to query for feature FFA_RXTX_MAP, err = %d\n", res);
515         return res;
516     }
517     if (!is_implemented_val) {
518         *is_implemented = false;
519         return NO_ERROR;
520     }
521     if (buf_size_log2) {
522         ulong buf_size_id = features2 & FFA_FEATURES2_RXTX_MAP_BUF_SIZE_MASK;
523         switch (buf_size_id) {
524         case FFA_FEATURES2_RXTX_MAP_BUF_SIZE_4K:
525             *buf_size_log2 = 12;
526             break;
527         case FFA_FEATURES2_RXTX_MAP_BUF_SIZE_16K:
528             *buf_size_log2 = 14;
529             break;
530         case FFA_FEATURES2_RXTX_MAP_BUF_SIZE_64K:
531             *buf_size_log2 = 16;
532             break;
533         default:
534             TRACEF("Unexpected rxtx buffer size identifier: %lx\n",
535                    buf_size_id);
536             return ERR_NOT_VALID;
537         }
538     }
539 
540     *is_implemented = true;
541     return NO_ERROR;
542 }
543 
arm_ffa_mem_retrieve_req_is_implemented(bool * is_implemented,bool * dyn_alloc_supp,bool * has_ns_bit,size_t * ref_count_num_bits)544 static status_t arm_ffa_mem_retrieve_req_is_implemented(
545         bool* is_implemented,
546         bool* dyn_alloc_supp,
547         bool* has_ns_bit,
548         size_t* ref_count_num_bits) {
549     ffa_features2_t features2;
550     ffa_features3_t features3;
551     bool is_implemented_val = false;
552     status_t res;
553 
554     ASSERT(is_implemented);
555 
556     res = arm_ffa_call_features(SMC_FC_FFA_MEM_RETRIEVE_REQ,
557                                 &is_implemented_val, &features2, &features3);
558     if (res != NO_ERROR) {
559         TRACEF("Failed to query for feature FFA_MEM_RETRIEVE_REQ, err = %d\n",
560                res);
561         return res;
562     }
563     if (!is_implemented_val) {
564         *is_implemented = false;
565         return NO_ERROR;
566     }
567     if (dyn_alloc_supp) {
568         *dyn_alloc_supp = !!(features2 & FFA_FEATURES2_MEM_DYNAMIC_BUFFER);
569     }
570     if (has_ns_bit) {
571         *has_ns_bit = !!(features2 & FFA_FEATURES2_MEM_RETRIEVE_REQ_NS_BIT);
572     }
573     if (ref_count_num_bits) {
574         *ref_count_num_bits =
575                 (features3 & FFA_FEATURES3_MEM_RETRIEVE_REQ_REFCOUNT_MASK) + 1;
576     }
577     *is_implemented = true;
578     return NO_ERROR;
579 }
580 
581 /* Helper function to set up the tx buffer with standard values
582    before calling FFA_MEM_RETRIEVE_REQ. */
arm_ffa_populate_receive_req_tx_buffer(uint16_t sender_id,uint64_t handle,uint64_t tag)583 static void arm_ffa_populate_receive_req_tx_buffer(uint16_t sender_id,
584                                                    uint64_t handle,
585                                                    uint64_t tag) {
586     struct ffa_mtd* req = ffa_tx;
587     DEBUG_ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
588 
589     memset(req, 0, sizeof(struct ffa_mtd));
590 
591     req->sender_id = sender_id;
592     req->handle = handle;
593     /* We must use the same tag as the one used by the sender to retrieve. */
594     req->tag = tag;
595 
596     /*
597      * We only support retrieving memory for ourselves for now.
598      * TODO: Also support stream endpoints. Possibly more than one.
599      */
600     req->emad_count = 1;
601     memset(req->emad, 0, sizeof(struct ffa_emad));
602     req->emad[0].mapd.endpoint_id = ffa_local_id;
603 }
604 
605 /* *desc_buffer is malloc'd and on success passes responsibility to free to
606    the caller. Populate the tx buffer before calling. */
arm_ffa_mem_retrieve(uint16_t sender_id,uint64_t handle,uint32_t * len,uint32_t * fragment_len)607 static status_t arm_ffa_mem_retrieve(uint16_t sender_id,
608                                      uint64_t handle,
609                                      uint32_t* len,
610                                      uint32_t* fragment_len) {
611     status_t res = NO_ERROR;
612 
613     DEBUG_ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
614     DEBUG_ASSERT(len);
615 
616     uint32_t len_out, fragment_len_out;
617     res = arm_ffa_call_mem_retrieve_req(&len_out, &fragment_len_out);
618     LTRACEF("total_len: %u, fragment_len: %u\n", len_out, fragment_len_out);
619     if (res != NO_ERROR) {
620         TRACEF("FF-A memory retrieve request failed, err = %d\n", res);
621         return res;
622     }
623     if (fragment_len_out > len_out) {
624         TRACEF("Fragment length larger than total length %u > %u\n",
625                fragment_len_out, len_out);
626         return ERR_IO;
627     }
628 
629     /* Check that the first fragment fits in our buffer */
630     if (fragment_len_out > ffa_buf_size) {
631         TRACEF("Fragment length %u larger than buffer size\n",
632                fragment_len_out);
633         return ERR_IO;
634     }
635 
636     if (fragment_len) {
637         *fragment_len = fragment_len_out;
638     }
639     if (len) {
640         *len = len_out;
641     }
642 
643     return NO_ERROR;
644 }
645 
arm_ffa_mem_address_range_get(struct arm_ffa_mem_frag_info * frag_info,size_t index,paddr_t * addr,size_t * size)646 status_t arm_ffa_mem_address_range_get(struct arm_ffa_mem_frag_info* frag_info,
647                                        size_t index,
648                                        paddr_t* addr,
649                                        size_t* size) {
650     uint32_t page_count;
651     size_t frag_idx;
652 
653     DEBUG_ASSERT(frag_info);
654 
655     if (index < frag_info->start_index ||
656         index >= frag_info->start_index + frag_info->count) {
657         return ERR_OUT_OF_RANGE;
658     }
659 
660     frag_idx = index - frag_info->start_index;
661 
662     page_count = frag_info->address_ranges[frag_idx].page_count;
663     LTRACEF("address %p, page_count 0x%x\n",
664             (void*)frag_info->address_ranges[frag_idx].address,
665             frag_info->address_ranges[frag_idx].page_count);
666     if (page_count < 1 || ((size_t)page_count > (SIZE_MAX / FFA_PAGE_SIZE))) {
667         TRACEF("bad page count 0x%x at %zu\n", page_count, index);
668         return ERR_IO;
669     }
670 
671     if (addr) {
672         *addr = (paddr_t)frag_info->address_ranges[frag_idx].address;
673     }
674     if (size) {
675         *size = page_count * FFA_PAGE_SIZE;
676     }
677 
678     return NO_ERROR;
679 }
680 
arm_ffa_mem_retrieve_start(uint16_t sender_id,uint64_t handle,uint64_t tag,uint32_t * address_range_count,uint * arch_mmu_flags,struct arm_ffa_mem_frag_info * frag_info)681 status_t arm_ffa_mem_retrieve_start(uint16_t sender_id,
682                                     uint64_t handle,
683                                     uint64_t tag,
684                                     uint32_t* address_range_count,
685                                     uint* arch_mmu_flags,
686                                     struct arm_ffa_mem_frag_info* frag_info) {
687     status_t res;
688     struct ffa_mtd* mtd;
689     struct ffa_emad* emad;
690     struct ffa_comp_mrd* comp_mrd;
691     uint32_t computed_len;
692     uint32_t header_size;
693 
694     uint32_t total_len;
695     uint32_t fragment_len;
696 
697     DEBUG_ASSERT(frag_info);
698 
699     mutex_acquire(&ffa_rxtx_buffer_lock);
700     arm_ffa_populate_receive_req_tx_buffer(sender_id, handle, tag);
701     res = arm_ffa_mem_retrieve(sender_id, handle, &total_len, &fragment_len);
702 
703     if (res != NO_ERROR) {
704         TRACEF("FF-A memory retrieve failed err=%d\n", res);
705         return res;
706     }
707 
708     if (fragment_len <
709         offsetof(struct ffa_mtd, emad) + sizeof(struct ffa_emad)) {
710         TRACEF("Fragment too short for memory transaction descriptor\n");
711         return ERR_IO;
712     }
713 
714     mtd = ffa_rx;
715     emad = mtd->emad;
716 
717     /*
718      * We don't retrieve the memory on behalf of anyone else, so we only
719      * expect one receiver address range descriptor.
720      */
721     if (mtd->emad_count != 1) {
722         TRACEF("unexpected response count %d != 1\n", mtd->emad_count);
723         return ERR_IO;
724     }
725 
726     LTRACEF("comp_mrd_offset: %u\n", emad->comp_mrd_offset);
727     if (emad->comp_mrd_offset + sizeof(*comp_mrd) > fragment_len) {
728         TRACEF("Fragment length %u too short for comp_mrd_offset %u\n",
729                fragment_len, emad->comp_mrd_offset);
730         return ERR_IO;
731     }
732 
733     comp_mrd = ffa_rx + emad->comp_mrd_offset;
734 
735     uint32_t address_range_count_out = comp_mrd->address_range_count;
736     frag_info->address_ranges = comp_mrd->address_range_array;
737     LTRACEF("address_range_count: %u\n", address_range_count_out);
738 
739     computed_len = emad->comp_mrd_offset +
740                    offsetof(struct ffa_comp_mrd, address_range_array) +
741                    sizeof(struct ffa_cons_mrd) * comp_mrd->address_range_count;
742     if (total_len != computed_len) {
743         TRACEF("Reported length %u != computed length %u\n", total_len,
744                computed_len);
745         return ERR_IO;
746     }
747 
748     header_size = emad->comp_mrd_offset +
749                   offsetof(struct ffa_comp_mrd, address_range_array);
750     frag_info->count =
751             (fragment_len - header_size) / sizeof(struct ffa_cons_mrd);
752     LTRACEF("Descriptors in fragment %u\n", frag_info->count);
753 
754     if (frag_info->count * sizeof(struct ffa_cons_mrd) + header_size !=
755         fragment_len) {
756         TRACEF("fragment length %u, contains partial descriptor\n",
757                fragment_len);
758         return ERR_IO;
759     }
760 
761     frag_info->received_len = fragment_len;
762     frag_info->start_index = 0;
763 
764     uint arch_mmu_flags_out = 0;
765 
766     switch (mtd->flags & FFA_MTD_FLAG_TYPE_MASK) {
767     case FFA_MTD_FLAG_TYPE_SHARE_MEMORY:
768         /*
769          * If memory is shared, assume it is not safe to execute out of. This
770          * specifically indicates that another party may have access to the
771          * memory.
772          */
773         arch_mmu_flags_out |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
774         break;
775     case FFA_MTD_FLAG_TYPE_LEND_MEMORY:
776         break;
777     case FFA_MTD_FLAG_TYPE_DONATE_MEMORY:
778         TRACEF("Unexpected donate memory transaction type is not supported\n");
779         return ERR_NOT_IMPLEMENTED;
780     default:
781         TRACEF("Unknown memory transaction type: 0x%x\n", mtd->flags);
782         return ERR_NOT_VALID;
783     }
784 
785     switch (mtd->memory_region_attributes & ~FFA_MEM_ATTR_NONSECURE) {
786     case FFA_MEM_ATTR_DEVICE_NGNRE:
787         arch_mmu_flags_out |= ARCH_MMU_FLAG_UNCACHED_DEVICE;
788         break;
789     case FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED:
790         arch_mmu_flags_out |= ARCH_MMU_FLAG_UNCACHED;
791         break;
792     case (FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB | FFA_MEM_ATTR_INNER_SHAREABLE):
793         arch_mmu_flags_out |= ARCH_MMU_FLAG_CACHED;
794         break;
795     default:
796         TRACEF("Invalid memory attributes, 0x%x\n",
797                mtd->memory_region_attributes);
798         return ERR_NOT_VALID;
799     }
800 
801     if (!(emad->mapd.memory_access_permissions & FFA_MEM_PERM_RW)) {
802         arch_mmu_flags_out |= ARCH_MMU_FLAG_PERM_RO;
803     }
804     if (emad->mapd.memory_access_permissions & FFA_MEM_PERM_NX) {
805         /*
806          * Don't allow executable mappings if the stage 2 page tables don't
807          * allow it. The hardware allows the stage 2 NX bit to only apply to
808          * EL1, not EL0, but neither FF-A nor LK can currently express this, so
809          * disallow both if FFA_MEM_PERM_NX is set.
810          */
811         arch_mmu_flags_out |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
812     }
813 
814     if (!supports_ns_bit ||
815         (mtd->memory_region_attributes & FFA_MEM_ATTR_NONSECURE)) {
816         arch_mmu_flags_out |= ARCH_MMU_FLAG_NS;
817         /* Regardless of origin, we don't want to execute out of NS memory. */
818         arch_mmu_flags_out |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
819     }
820 
821     if (arch_mmu_flags) {
822         *arch_mmu_flags = arch_mmu_flags_out;
823     }
824     if (address_range_count) {
825         *address_range_count = address_range_count_out;
826     }
827 
828     return res;
829 }
830 
831 /* This assumes that the fragment is completely composed of memory
832    region descriptors (struct ffa_cons_mrd) */
arm_ffa_mem_retrieve_next_frag(uint64_t handle,struct arm_ffa_mem_frag_info * frag_info)833 status_t arm_ffa_mem_retrieve_next_frag(
834         uint64_t handle,
835         struct arm_ffa_mem_frag_info* frag_info) {
836     status_t res;
837     uint32_t fragment_len;
838 
839     mutex_acquire(&ffa_rxtx_buffer_lock);
840 
841     res = arm_ffa_call_mem_frag_rx(handle, frag_info->received_len,
842                                    &fragment_len);
843 
844     if (res != NO_ERROR) {
845         TRACEF("Failed to get memory retrieve fragment, err = %d\n", res);
846         return res;
847     }
848 
849     frag_info->received_len += fragment_len;
850     frag_info->start_index += frag_info->count;
851 
852     frag_info->count = fragment_len / sizeof(struct ffa_cons_mrd);
853     if (frag_info->count * sizeof(struct ffa_cons_mrd) != fragment_len) {
854         TRACEF("fragment length %u, contains partial descriptor\n",
855                fragment_len);
856         return ERR_IO;
857     }
858 
859     frag_info->address_ranges = ffa_rx;
860 
861     return NO_ERROR;
862 }
863 
arm_ffa_rx_release(void)864 status_t arm_ffa_rx_release(void) {
865     status_t res;
866     ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
867 
868     if (!supports_rx_release) {
869         res = NO_ERROR;
870     } else {
871         res = arm_ffa_call_rx_release();
872     }
873 
874     mutex_release(&ffa_rxtx_buffer_lock);
875 
876     if (res == ERR_NOT_SUPPORTED) {
877         TRACEF("Tried to release rx buffer when the operation is not supported!\n");
878     } else if (res != NO_ERROR) {
879         TRACEF("Failed to release rx buffer, err = %d\n", res);
880         return res;
881     }
882     return NO_ERROR;
883 }
884 
arm_ffa_mem_relinquish(uint64_t handle)885 status_t arm_ffa_mem_relinquish(uint64_t handle) {
886     status_t res;
887 
888     /* As flags are set to 0 no request to zero the memory is made */
889     res = arm_ffa_call_mem_relinquish(handle, 0, 1, &ffa_local_id);
890     if (res != NO_ERROR) {
891         TRACEF("Failed to relinquish memory region, err = %d\n", res);
892     }
893 
894     return res;
895 }
896 
arm_ffa_setup(void)897 static status_t arm_ffa_setup(void) {
898     status_t res;
899     uint16_t ver_major_ret;
900     uint16_t ver_minor_ret;
901     bool is_implemented;
902     size_t buf_size_log2;
903     size_t ref_count_num_bits;
904     size_t arch_page_count;
905     size_t ffa_page_count;
906     size_t count;
907     paddr_t tx_paddr;
908     paddr_t rx_paddr;
909     void* tx_vaddr;
910     void* rx_vaddr;
911     struct list_node page_list = LIST_INITIAL_VALUE(page_list);
912 
913     res = arm_ffa_call_version(FFA_CURRENT_VERSION_MAJOR,
914                                FFA_CURRENT_VERSION_MINOR, &ver_major_ret,
915                                &ver_minor_ret);
916     if (res != NO_ERROR) {
917         TRACEF("No compatible FF-A version found\n");
918         return res;
919     } else if (FFA_CURRENT_VERSION_MAJOR != ver_major_ret ||
920                FFA_CURRENT_VERSION_MINOR > ver_minor_ret) {
921         /* When trusty supports more FF-A versions downgrade may be possible */
922         TRACEF("Incompatible FF-A interface version, %" PRIu16 ".%" PRIu16 "\n",
923                ver_major_ret, ver_minor_ret);
924         return ERR_NOT_SUPPORTED;
925     }
926 
927     res = arm_ffa_call_id_get(&ffa_local_id);
928     if (res != NO_ERROR) {
929         TRACEF("Failed to get FF-A partition id (err=%d)\n", res);
930         return res;
931     }
932 
933     res = arm_ffa_rx_release_is_implemented(&is_implemented);
934     if (res != NO_ERROR) {
935         TRACEF("Error checking if FFA_RX_RELEASE is implemented (err=%d)\n",
936                res);
937         return res;
938     }
939     if (is_implemented) {
940         supports_rx_release = true;
941     } else {
942         LTRACEF("FFA_RX_RELEASE is not implemented\n");
943     }
944 
945     res = arm_ffa_rxtx_map_is_implemented(&is_implemented, &buf_size_log2);
946     if (res != NO_ERROR) {
947         TRACEF("Error checking if FFA_RXTX_MAP is implemented (err=%d)\n", res);
948         return res;
949     }
950     if (!is_implemented) {
951         TRACEF("FFA_RXTX_MAP is not implemented\n");
952         return ERR_NOT_SUPPORTED;
953     }
954 
955     res = arm_ffa_mem_retrieve_req_is_implemented(
956             &is_implemented, NULL, &supports_ns_bit, &ref_count_num_bits);
957     if (res != NO_ERROR) {
958         TRACEF("Error checking if FFA_MEM_RETRIEVE_REQ is implemented (err=%d)\n",
959                res);
960         return res;
961     }
962     if (!is_implemented) {
963         TRACEF("FFA_MEM_RETRIEVE_REQ is not implemented\n");
964         return ERR_NOT_SUPPORTED;
965     }
966 
967     if (ref_count_num_bits < 64) {
968         /*
969          * Expect 64 bit reference count. If we don't have it, future calls to
970          * SMC_FC_FFA_MEM_RETRIEVE_REQ can fail if we receive the same handle
971          * multiple times. Warn about this, but don't return an error as we only
972          * receive each handle once in the typical case.
973          */
974         TRACEF("Warning FFA_MEM_RETRIEVE_REQ does not have 64 bit reference count (%zu)\n",
975                ref_count_num_bits);
976     }
977 
978     ffa_buf_size = 1U << buf_size_log2;
979     ASSERT((ffa_buf_size % FFA_PAGE_SIZE) == 0);
980 
981     arch_page_count = DIV_ROUND_UP(ffa_buf_size, PAGE_SIZE);
982     ffa_page_count = ffa_buf_size / FFA_PAGE_SIZE;
983     count = pmm_alloc_contiguous(arch_page_count, buf_size_log2, &tx_paddr,
984                                  &page_list);
985     if (count != arch_page_count) {
986         TRACEF("Failed to allocate tx buffer %zx!=%zx\n", count,
987                arch_page_count);
988         res = ERR_NO_MEMORY;
989         goto err_alloc_tx;
990     }
991     tx_vaddr = paddr_to_kvaddr(tx_paddr);
992     ASSERT(tx_vaddr);
993 
994     count = pmm_alloc_contiguous(arch_page_count, buf_size_log2, &rx_paddr,
995                                  &page_list);
996     if (count != arch_page_count) {
997         TRACEF("Failed to allocate rx buffer %zx!=%zx\n", count,
998                arch_page_count);
999         res = ERR_NO_MEMORY;
1000         goto err_alloc_rx;
1001     }
1002     rx_vaddr = paddr_to_kvaddr(rx_paddr);
1003     ASSERT(rx_vaddr);
1004 
1005     res = arm_ffa_call_rxtx_map(tx_paddr, rx_paddr, ffa_page_count);
1006     if (res != NO_ERROR) {
1007         TRACEF("Failed to map tx @ %p, rx @ %p, page count 0x%zx (err=%d)\n",
1008                (void*)tx_paddr, (void*)rx_paddr, ffa_page_count, res);
1009         goto err_rxtx_map;
1010     }
1011 
1012     ffa_tx = tx_vaddr;
1013     ffa_rx = rx_vaddr;
1014 
1015     return res;
1016 
1017 err_rxtx_map:
1018 err_alloc_rx:
1019     pmm_free(&page_list);
1020 err_alloc_tx:
1021     /* pmm_alloc_contiguous leaves the page list unchanged on failure */
1022 
1023     return res;
1024 }
1025 
arm_ffa_init(uint level)1026 static void arm_ffa_init(uint level) {
1027     status_t res;
1028 
1029     res = arm_ffa_setup();
1030 
1031     if (res == NO_ERROR) {
1032         arm_ffa_init_is_success = true;
1033 
1034 #if WITH_SMP
1035         res = ffa_call_secondary_ep_register();
1036         if (res == ERR_NOT_SUPPORTED) {
1037             LTRACEF("FFA_SECONDARY_EP_REGISTER is not supported\n");
1038         } else if (res != NO_ERROR) {
1039             TRACEF("Failed to register secondary core entry point (err=%d)\n",
1040                    res);
1041         }
1042 #endif
1043     } else {
1044         TRACEF("Failed to initialize FF-A (err=%d)\n", res);
1045     }
1046 }
1047 
1048 LK_INIT_HOOK(arm_ffa_init, arm_ffa_init, LK_INIT_LEVEL_PLATFORM - 2);
1049