1 /*
2 * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <common/debug.h>
9 #include <common/runtime_svc.h>
10 #include <errno.h>
11 #include <inttypes.h>
12 #include <lib/object_pool.h>
13 #include <lib/spinlock.h>
14 #include <lib/xlat_tables/xlat_tables_v2.h>
15 #include <platform_def.h>
16 #include <trusty/arm_ffa.h>
17 #include <trusty/ffa_helpers.h>
18 #include <trusty/plat/shared_mem.h>
19
20 /*
21 * Use a 512KB buffer by default for shared memory descriptors. Set
22 * TRUSTY_SHARED_MEMORY_OBJ_SIZE in platform_def.h to use a different value.
23 */
24 #ifndef TRUSTY_SHARED_MEMORY_OBJ_SIZE
25 #define TRUSTY_SHARED_MEMORY_OBJ_SIZE (512 * 1024)
26 #endif
27
28 #pragma weak plat_mem_set_shared
plat_mem_set_shared(struct ffa_mtd * mtd,bool shared)29 int plat_mem_set_shared(struct ffa_mtd *mtd, bool shared)
30 {
31 /**
32 * The platform should check that the memory is nonsecure, however a
33 * default implementation is provided here without that check for
34 * backward compatibility.
35 */
36 if (trusty_ffa_should_be_secure(mtd)) {
37 return -EINVAL;
38 }
39 return 0;
40 }
41
42 /**
43 * struct trusty_shmem_obj - Shared memory object.
44 * @desc_size: Size of @desc.
45 * @desc_filled: Size of @desc already received.
46 * @in_use: Number of clients that have called ffa_mem_retrieve_req
47 * without a matching ffa_mem_relinquish call.
48 * @desc: FF-A memory region descriptor passed in ffa_mem_share.
49 */
50 struct trusty_shmem_obj {
51 size_t desc_size;
52 size_t desc_filled;
53 size_t in_use;
54 struct ffa_mtd desc;
55 };
56
57 /**
58 * struct trusty_shmem_obj_state - Global state.
59 * @data: Backing store for trusty_shmem_obj objects.
60 * @allocated: Number of bytes allocated in @data.
61 * @next_handle: Handle used for next allocated object.
62 * @lock: Lock protecting all state in this file.
63 */
64 struct trusty_shmem_obj_state {
65 uint8_t *data;
66 size_t allocated;
67 uint64_t next_handle;
68 struct spinlock lock;
69 };
70
71 /**
72 * struct trusty_shmem_client_state - Per client state.
73 * @tx_buf: Client's transmit buffer.
74 * @rx_buf: Client's receive buffer.
75 * @buf_size: Size of @tx_buf and @rx_buf.
76 * @secure: If %true, the client is the secure os.
77 * @identity_mapped: If %true, all client memory is identity mapped.
78 * @receiver: If %true, the client is allowed to receive memory.
79 * If %false, the client is allowed to send memory.
80 * @use_ns_bit: If %true, the client expects the NS bit in the
81 * memory_region_attributes of the ffa_mtd to be set for
82 * nonsecure memory.
83 * If %false, the client does not support the NS bit
84 * in the memory_region_attributes of the ffa_mtd.
85 */
86 struct trusty_shmem_client_state {
87 const void *tx_buf;
88 void *rx_buf;
89 size_t buf_size;
90 const bool secure;
91 const bool identity_mapped;
92 const bool receiver;
93 bool use_ns_bit;
94 };
95
96 /*
97 * This is given its own name so that it can be pulled out of .bss by
98 * a linker script and put in a different section if desired.
99 */
100 __section(".bss.trusty.shmem.objs_data")
101 __aligned(8) static uint8_t
102 trusty_shmem_objs_data[TRUSTY_SHARED_MEMORY_OBJ_SIZE];
103 static struct trusty_shmem_obj_state trusty_shmem_obj_state = {
104 /* initializing data this way keeps the bulk of the state in .bss */
105 .data = trusty_shmem_objs_data,
106 /* Set start value for handle so top 32 bits are needed quickly */
107 .next_handle = 0xffffffc0,
108 };
109
110 typedef struct trusty_sp_desc {
111 uint16_t sp_id;
112 const uuid_t *uuid;
113 uint32_t ffa_version;
114 uint32_t properties;
115 struct trusty_shmem_client_state *client;
116 } trusty_sp_desc_t;
117
118 static trusty_sp_desc_t trusty_sp;
119
120 typedef struct trusty_ns_client_desc {
121 uint16_t ep_id;
122 struct trusty_shmem_client_state *client;
123 } trusty_ns_client_desc_t;
124
125 static trusty_ns_client_desc_t trusty_ns_client;
126
127 static struct trusty_shmem_client_state trusty_shmem_client_state[2] = {
128 [true].secure = true,
129 [true].identity_mapped = true,
130 [true].receiver = true,
131 };
132
133 /**
134 * trusty_shmem_obj_size - Convert from descriptor size to object size.
135 * @desc_size: Size of struct ffa_memory_region_descriptor object.
136 *
137 * Return: Size of struct trusty_shmem_obj object.
138 */
trusty_shmem_obj_size(size_t desc_size)139 static size_t trusty_shmem_obj_size(size_t desc_size)
140 {
141 return desc_size + offsetof(struct trusty_shmem_obj, desc);
142 }
143
144 /**
145 * trusty_shmem_obj_alloc - Allocate struct trusty_shmem_obj.
146 * @state: Global state.
147 * @desc_size: Size of struct ffa_memory_region_descriptor object that
148 * allocated object will hold.
149 *
150 * Return: Pointer to newly allocated object, or %NULL if there not enough space
151 * left. The returned pointer is only valid while @state is locked, to
152 * used it again after unlocking @state, trusty_shmem_obj_lookup must be
153 * called.
154 */
155 static struct trusty_shmem_obj *
trusty_shmem_obj_alloc(struct trusty_shmem_obj_state * state,size_t desc_size)156 trusty_shmem_obj_alloc(struct trusty_shmem_obj_state *state, size_t desc_size)
157 {
158 struct trusty_shmem_obj *obj;
159 size_t free = sizeof(trusty_shmem_objs_data) - state->allocated;
160 if (trusty_shmem_obj_size(desc_size) > free) {
161 NOTICE("%s(0x%zx) failed, free 0x%zx\n",
162 __func__, desc_size, free);
163 return NULL;
164 }
165 obj = (struct trusty_shmem_obj *)(state->data + state->allocated);
166 obj->desc_size = desc_size;
167 obj->desc_filled = 0;
168 obj->in_use = 0;
169 state->allocated += trusty_shmem_obj_size(desc_size);
170 return obj;
171 }
172
173 /**
174 * trusty_shmem_obj_free - Free struct trusty_shmem_obj.
175 * @state: Global state.
176 * @obj: Object to free.
177 *
178 * Release memory used by @obj. Other objects may move, so on return all
179 * pointers to struct trusty_shmem_obj object should be considered invalid, not
180 * just @obj.
181 *
182 * The current implementation always compacts the remaining objects to simplify
183 * the allocator and to avoid fragmentation.
184 */
185
trusty_shmem_obj_free(struct trusty_shmem_obj_state * state,struct trusty_shmem_obj * obj)186 static void trusty_shmem_obj_free(struct trusty_shmem_obj_state *state,
187 struct trusty_shmem_obj *obj)
188 {
189 size_t free_size = trusty_shmem_obj_size(obj->desc_size);
190 uint8_t *shift_dest = (uint8_t *)obj;
191 uint8_t *shift_src = shift_dest + free_size;
192 size_t shift_size = state->allocated - (shift_src - state->data);
193 if (shift_size) {
194 memmove(shift_dest, shift_src, shift_size);
195 }
196 state->allocated -= free_size;
197 }
198
199 /**
200 * trusty_shmem_obj_lookup - Lookup struct trusty_shmem_obj by handle.
201 * @state: Global state.
202 * @handle: Unique handle of object to return.
203 *
204 * Return: struct trusty_shmem_obj_state object with handle matching @handle.
205 * %NULL, if not object in @state->data has a matching handle.
206 */
207 static struct trusty_shmem_obj *
trusty_shmem_obj_lookup(struct trusty_shmem_obj_state * state,uint64_t handle)208 trusty_shmem_obj_lookup(struct trusty_shmem_obj_state *state, uint64_t handle)
209 {
210 uint8_t *curr = state->data;
211 while (curr - state->data < state->allocated) {
212 struct trusty_shmem_obj *obj = (struct trusty_shmem_obj *)curr;
213 if (obj->desc.handle == handle) {
214 return obj;
215 }
216 curr += trusty_shmem_obj_size(obj->desc_size);
217 }
218 return NULL;
219 }
220
221 static struct ffa_comp_mrd *
trusty_shmem_obj_get_comp_mrd(struct trusty_shmem_obj * obj)222 trusty_shmem_obj_get_comp_mrd(struct trusty_shmem_obj *obj)
223 {
224 return trusty_ffa_mtd_get_comp_mrd(&obj->desc);
225 }
226
227 /**
228 * trusty_shmem_obj_ffa_constituent_size - Calculate variable size part of obj.
229 * @obj: Object containing ffa_memory_region_descriptor.
230 *
231 * Return: Size of ffa_constituent_memory_region_descriptors in @obj.
232 */
233 static size_t
trusty_shmem_obj_ffa_constituent_size(struct trusty_shmem_obj * obj)234 trusty_shmem_obj_ffa_constituent_size(struct trusty_shmem_obj *obj)
235 {
236 return trusty_shmem_obj_get_comp_mrd(obj)->address_range_count *
237 sizeof(struct ffa_cons_mrd);
238 }
239
240 /**
241 * trusty_shmem_check_obj - Check that counts in descriptor match overall size.
242 * @obj: Object containing ffa_memory_region_descriptor.
243 *
244 * Return: 0 if object is valid, -EINVAL if memory region attributes count is
245 * not 1, -EINVAL if constituent_memory_region_descriptor offset or count is
246 * invalid.
247 */
trusty_shmem_check_obj(struct trusty_shmem_obj * obj)248 static int trusty_shmem_check_obj(struct trusty_shmem_obj *obj)
249 {
250 if (obj->desc.emad_count != 1) {
251 NOTICE("%s: unsupported attribute desc count %u != 1\n",
252 __func__, obj->desc.emad_count);
253 return -EINVAL;
254 }
255
256 uint32_t offset = obj->desc.emad[0].comp_mrd_offset;
257 size_t header_emad_size = sizeof(obj->desc) +
258 obj->desc.emad_count * sizeof(obj->desc.emad[0]);
259
260 if (offset < header_emad_size) {
261 NOTICE("%s: invalid object, offset %u < header + emad %zu\n",
262 __func__, offset, header_emad_size);
263 return -EINVAL;
264 }
265
266 size_t size = obj->desc_size;
267 if (offset > size) {
268 NOTICE("%s: invalid object, offset %u > total size %zu\n",
269 __func__, offset, obj->desc_size);
270 return -EINVAL;
271 }
272 size -= offset;
273
274 if (size < sizeof(struct ffa_comp_mrd)) {
275 NOTICE("%s: invalid object, offset %u, total size %zu, no space for header\n",
276 __func__, offset, obj->desc_size);
277 return -EINVAL;
278 }
279 size -= sizeof(struct ffa_comp_mrd);
280
281 size_t count = size / sizeof(struct ffa_cons_mrd);
282
283 struct ffa_comp_mrd *comp = trusty_shmem_obj_get_comp_mrd(obj);
284
285 if (comp->address_range_count != count) {
286 NOTICE("%s: invalid object, desc count %u != %zu\n",
287 __func__, comp->address_range_count, count);
288 return -EINVAL;
289 }
290
291 size_t expected_size = offset + sizeof(*comp) +
292 trusty_shmem_obj_ffa_constituent_size(obj);
293 if (expected_size != obj->desc_size) {
294 NOTICE("%s: invalid object, computed size %zu != size %zu\n",
295 __func__, expected_size, obj->desc_size);
296 return -EINVAL;
297 }
298
299 if (obj->desc_filled < obj->desc_size) {
300 /*
301 * The whole descriptor has not yet been received. Skip final
302 * checks.
303 */
304 return 0;
305 }
306
307 size_t total_page_count = 0;
308 for (size_t i = 0; i < count; i++) {
309 total_page_count +=
310 comp->address_range_array[i].page_count;
311 }
312 if (comp->total_page_count != total_page_count) {
313 NOTICE("%s: invalid object, desc total_page_count %u != %zu\n",
314 __func__, comp->total_page_count,
315 total_page_count);
316 return -EINVAL;
317 }
318
319 return 0;
320 }
321
trusty_ffa_fill_desc(struct trusty_shmem_client_state * client,struct trusty_shmem_obj * obj,uint32_t fragment_length,ffa_mtd_flag32_t mtd_flags,void * smc_handle)322 static long trusty_ffa_fill_desc(struct trusty_shmem_client_state *client,
323 struct trusty_shmem_obj *obj,
324 uint32_t fragment_length,
325 ffa_mtd_flag32_t mtd_flags,
326 void *smc_handle)
327 {
328 int ret;
329
330 if (!client->buf_size) {
331 NOTICE("%s: buffer pair not registered\n", __func__);
332 ret = -EINVAL;
333 goto err_arg;
334 }
335
336 if (fragment_length > client->buf_size) {
337 NOTICE("%s: bad fragment size %u > %zu buffer size\n", __func__,
338 fragment_length, client->buf_size);
339 ret = -EINVAL;
340 goto err_arg;
341 }
342
343 if (fragment_length > obj->desc_size - obj->desc_filled) {
344 NOTICE("%s: bad fragment size %u > %zu remaining\n", __func__,
345 fragment_length, obj->desc_size - obj->desc_filled);
346 ret = -EINVAL;
347 goto err_arg;
348 }
349
350 memcpy((uint8_t *)&obj->desc + obj->desc_filled, client->tx_buf,
351 fragment_length);
352
353 if (!obj->desc_filled) {
354 /* First fragment, descriptor header has been copied */
355 obj->desc.handle = trusty_shmem_obj_state.next_handle++;
356 obj->desc.flags = mtd_flags;
357 obj->desc.memory_region_attributes |= FFA_MEM_ATTR_NONSECURE;
358 }
359
360 obj->desc_filled += fragment_length;
361
362 ret = trusty_shmem_check_obj(obj);
363 if (ret) {
364 goto err_bad_desc;
365 }
366
367 uint32_t handle_low = (uint32_t)obj->desc.handle;
368 uint32_t handle_high = obj->desc.handle >> 32;
369 if (obj->desc_filled != obj->desc_size) {
370 SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
371 handle_high, obj->desc_filled,
372 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
373 }
374
375 ret = plat_mem_set_shared(&obj->desc, true);
376 if (ret) {
377 goto err_share_fail;
378 }
379
380 SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
381 0, 0, 0);
382
383 err_share_fail:
384 err_bad_desc:
385 err_arg:
386 trusty_shmem_obj_free(&trusty_shmem_obj_state, obj);
387 return ret;
388 }
389
390 /**
391 * trusty_ffa_mem_share - FFA_MEM_SHARE implementation.
392 * @client: Client state.
393 * @total_length: Total length of shared memory descriptor.
394 * @fragment_length: Length of fragment of shared memory descriptor passed in
395 * this call.
396 * @address: Not supported, must be 0.
397 * @page_count: Not supported, must be 0.
398 * @smc_handle: Handle passed to smc call. Used to return
399 * FFA_MEM_FRAG_RX or FFA_SUCCESS_SMC32.
400 *
401 * Implements a subset of the FF-A FFA_MEM_SHARE call needed to share memory
402 * from non-secure os to secure os (with no stream endpoints).
403 *
404 * Return: 0 on success, error code on failure.
405 */
trusty_ffa_mem_share(struct trusty_shmem_client_state * client,uint32_t total_length,uint32_t fragment_length,uint64_t address,uint32_t page_count,ffa_mtd_flag32_t mtd_flags,void * smc_handle)406 static long trusty_ffa_mem_share(struct trusty_shmem_client_state *client,
407 uint32_t total_length,
408 uint32_t fragment_length,
409 uint64_t address,
410 uint32_t page_count,
411 ffa_mtd_flag32_t mtd_flags,
412 void *smc_handle)
413 {
414 struct trusty_shmem_obj *obj;
415
416 if (address || page_count) {
417 NOTICE("%s: custom memory region for message not supported\n",
418 __func__);
419 return -EINVAL;
420 }
421
422 if (client->receiver) {
423 NOTICE("%s: unsupported share direction\n", __func__);
424 return -EINVAL;
425 }
426
427 if (fragment_length < sizeof(obj->desc)) {
428 NOTICE("%s: bad first fragment size %u < %zu\n",
429 __func__, fragment_length, sizeof(obj->desc));
430 return -EINVAL;
431 }
432 obj = trusty_shmem_obj_alloc(&trusty_shmem_obj_state, total_length);
433 if (!obj) {
434 return -ENOMEM;
435 }
436
437 return trusty_ffa_fill_desc(client, obj, fragment_length, mtd_flags,
438 smc_handle);
439 }
440
441 /**
442 * trusty_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
443 * @client: Client state.
444 * @handle_low: Handle_low value returned from FFA_MEM_FRAG_RX.
445 * @handle_high: Handle_high value returned from FFA_MEM_FRAG_RX.
446 * @fragment_length: Length of fragments transmitted.
447 * @sender_id: Vmid of sender in bits [31:16]
448 * @smc_handle: Handle passed to smc call. Used to return
449 * FFA_MEM_FRAG_RX or FFA_SUCCESS_SMC32.
450 *
451 * Return: @smc_handle on success, error code on failure.
452 */
trusty_ffa_mem_frag_tx(struct trusty_shmem_client_state * client,uint32_t handle_low,uint32_t handle_high,uint32_t fragment_length,uint32_t sender_id,void * smc_handle)453 static long trusty_ffa_mem_frag_tx(struct trusty_shmem_client_state *client,
454 uint32_t handle_low,
455 uint32_t handle_high,
456 uint32_t fragment_length,
457 uint32_t sender_id,
458 void *smc_handle)
459 {
460 struct trusty_shmem_obj *obj;
461 uint64_t handle = handle_low | (((uint64_t)handle_high) << 32);
462
463 if (client->receiver) {
464 NOTICE("%s: unsupported share direction\n", __func__);
465 return -EINVAL;
466 }
467
468 obj = trusty_shmem_obj_lookup(&trusty_shmem_obj_state, handle);
469 if (!obj) {
470 NOTICE("%s: invalid handle, 0x%" PRIx64
471 ", not a valid handle\n", __func__, handle);
472 return -ENOENT;
473 }
474
475 if (sender_id != (uint32_t)obj->desc.sender_id << 16) {
476 NOTICE("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
477 sender_id, (uint32_t)obj->desc.sender_id << 16);
478 return -ENOENT;
479 }
480
481 if (obj->desc_filled == obj->desc_size) {
482 NOTICE("%s: object desc already filled, %zu\n", __func__,
483 obj->desc_filled);
484 return -EINVAL;
485 }
486
487 return trusty_ffa_fill_desc(client, obj, fragment_length, 0,
488 smc_handle);
489 }
490
491 /**
492 * trusty_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
493 * @client: Client state.
494 * @total_length: Total length of retrieve request descriptor if this is
495 * the first call. Otherwise (unsupported) must be 0.
496 * @fragment_length: Length of fragment of retrieve request descriptor passed
497 * in this call. Only @fragment_length == @length is
498 * supported by this implementation.
499 * @address: Not supported, must be 0.
500 * @page_count: Not supported, must be 0.
501 * @smc_handle: Handle passed to smc call. Used to return
502 * FFA_MEM_RETRIEVE_RESP.
503 *
504 * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
505 * Used by secure os to retrieve memory already shared by non-secure os.
506 * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
507 * the client must call FFA_MEM_FRAG_RX until the full response has been
508 * received.
509 *
510 * Return: @smc_handle on success, error code on failure.
511 */
512 static long
trusty_ffa_mem_retrieve_req(struct trusty_shmem_client_state * client,uint32_t total_length,uint32_t fragment_length,uint64_t address,uint32_t page_count,void * smc_handle)513 trusty_ffa_mem_retrieve_req(struct trusty_shmem_client_state *client,
514 uint32_t total_length,
515 uint32_t fragment_length,
516 uint64_t address,
517 uint32_t page_count,
518 void *smc_handle)
519 {
520 struct trusty_shmem_obj *obj = NULL;
521 const struct ffa_mtd *req = client->tx_buf;
522 struct ffa_mtd *resp = client->rx_buf;
523
524 if (!client->buf_size) {
525 NOTICE("%s: buffer pair not registered\n", __func__);
526 return -EINVAL;
527 }
528
529 if (address || page_count) {
530 NOTICE("%s: custom memory region not supported\n", __func__);
531 return -EINVAL;
532 }
533
534 if (fragment_length != total_length) {
535 NOTICE("%s: fragmented retrieve request not supported\n",
536 __func__);
537 return -EINVAL;
538 }
539
540 /* req->emad_count is not set for retrieve by hypervisor */
541 if (client->receiver && req->emad_count != 1) {
542 NOTICE("%s: unsupported retrieve descriptor count: %u\n",
543 __func__, req->emad_count);
544 return -EINVAL;
545 }
546
547 if (total_length < sizeof(*req)) {
548 NOTICE("%s: invalid length %u < %zu\n", __func__, total_length,
549 sizeof(*req));
550 return -EINVAL;
551 }
552
553 obj = trusty_shmem_obj_lookup(&trusty_shmem_obj_state, req->handle);
554 if (!obj) {
555 return -ENOENT;
556 }
557
558 if (obj->desc_filled != obj->desc_size) {
559 NOTICE("%s: incomplete object desc filled %zu < size %zu\n",
560 __func__, obj->desc_filled, obj->desc_size);
561 return -EINVAL;
562 }
563
564 if (req->emad_count && req->sender_id != obj->desc.sender_id) {
565 NOTICE("%s: wrong sender id 0x%x != 0x%x\n",
566 __func__, req->sender_id, obj->desc.sender_id);
567 return -EINVAL;
568 }
569
570 if (req->emad_count && req->tag != obj->desc.tag) {
571 NOTICE("%s: wrong tag 0x%" PRIx64 " != 0x%" PRIx64 "\n",
572 __func__, req->tag, obj->desc.tag);
573 return -EINVAL;
574 }
575
576 if (req->flags != 0 && req->flags != obj->desc.flags) {
577 /*
578 * Current implementation does not support donate, and it
579 * supports no other flags. obj->desc.flags will be
580 * FFA_MTD_FLAG_TYPE_SHARE_MEMORY or
581 * FFA_MTD_FLAG_TYPE_LEND_MEMORY.
582 */
583 NOTICE("%s: invalid flags 0x%x\n", __func__, req->flags);
584 return -EINVAL;
585 }
586
587 /* TODO: support more than one endpoint ids */
588 if (req->emad_count &&
589 req->emad[0].mapd.endpoint_id !=
590 obj->desc.emad[0].mapd.endpoint_id) {
591 NOTICE("%s: wrong receiver id 0x%x != 0x%x\n",
592 __func__, req->emad[0].mapd.endpoint_id,
593 obj->desc.emad[0].mapd.endpoint_id);
594 return -EINVAL;
595 }
596
597 if (req->emad_count) {
598 obj->in_use++;
599 }
600
601 size_t copy_size = MIN(obj->desc_size, client->buf_size);
602
603 memcpy(resp, &obj->desc, copy_size);
604
605 if (!client->use_ns_bit) {
606 assert(copy_size >= sizeof(*resp));
607 resp->memory_region_attributes &= ~FFA_MEM_ATTR_NONSECURE;
608 }
609
610 SMC_RET8(smc_handle, FFA_MEM_RETRIEVE_RESP, obj->desc_size,
611 copy_size, 0, 0, 0, 0, 0);
612 }
613
614 /**
615 * trusty_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
616 * @client: Client state.
617 * @handle_low: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
618 * @handle_high: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
619 * @fragment_offset: Byte offset in descriptor to resume at.
620 * @sender_id: Bit[31:16]: Endpoint id of sender if client is a
621 * hypervisor. 0 otherwise.
622 * @smc_handle: Handle passed to smc call. Used to return
623 * FFA_MEM_FRAG_TX.
624 *
625 * Return: @smc_handle on success, error code on failure.
626 */
trusty_ffa_mem_frag_rx(struct trusty_shmem_client_state * client,uint32_t handle_low,uint32_t handle_high,uint32_t fragment_offset,uint32_t sender_id,void * smc_handle)627 static long trusty_ffa_mem_frag_rx(struct trusty_shmem_client_state *client,
628 uint32_t handle_low,
629 uint32_t handle_high,
630 uint32_t fragment_offset,
631 uint32_t sender_id,
632 void *smc_handle)
633 {
634 struct trusty_shmem_obj *obj;
635 uint64_t handle = handle_low | (((uint64_t)handle_high) << 32);
636
637 if (!client->buf_size) {
638 NOTICE("%s: buffer pair not registered\n", __func__);
639 return -EINVAL;
640 }
641
642 if (client->secure && sender_id) {
643 NOTICE("%s: invalid sender_id 0x%x != 0\n",
644 __func__, sender_id);
645 return -EINVAL;
646 }
647
648 obj = trusty_shmem_obj_lookup(&trusty_shmem_obj_state, handle);
649 if (!obj) {
650 NOTICE("%s: invalid handle, 0x%" PRIx64
651 ", not a valid handle\n", __func__, handle);
652 return -ENOENT;
653 }
654
655 if (!client->secure && sender_id &&
656 sender_id != (uint32_t)obj->desc.sender_id << 16) {
657 NOTICE("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
658 sender_id, (uint32_t)obj->desc.sender_id << 16);
659 return -ENOENT;
660 }
661
662 if (fragment_offset >= obj->desc_size) {
663 NOTICE("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
664 __func__, fragment_offset, obj->desc_size);
665 return -EINVAL;
666 }
667
668 size_t full_copy_size = obj->desc_size - fragment_offset;
669 size_t copy_size = MIN(full_copy_size, client->buf_size);
670
671 void *src = &obj->desc;
672
673 memcpy(client->rx_buf, src + fragment_offset, copy_size);
674
675 SMC_RET8(smc_handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
676 copy_size, sender_id, 0, 0, 0);
677 }
678
679 /**
680 * trusty_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
681 * @client: Client state.
682 *
683 * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
684 * Used by secure os release previously shared memory to non-secure os.
685 *
686 * The handle to release must be in the client's (secure os's) transmit buffer.
687 *
688 * Return: 0 on success, error code on failure.
689 */
trusty_ffa_mem_relinquish(struct trusty_shmem_client_state * client)690 static int trusty_ffa_mem_relinquish(struct trusty_shmem_client_state *client)
691 {
692 struct trusty_shmem_obj *obj;
693 const struct ffa_mem_relinquish_descriptor *req = client->tx_buf;
694
695 if (!client->buf_size) {
696 NOTICE("%s: buffer pair not registered\n", __func__);
697 return -EINVAL;
698 }
699
700 if (!client->receiver) {
701 NOTICE("%s: unsupported share direction\n", __func__);
702 return -EINVAL;
703 }
704
705 if (req->flags) {
706 NOTICE("%s: unsupported flags 0x%x\n", __func__, req->flags);
707 return -EINVAL;
708 }
709
710 obj = trusty_shmem_obj_lookup(&trusty_shmem_obj_state, req->handle);
711 if (!obj) {
712 return -ENOENT;
713 }
714
715 if (obj->desc.emad_count != req->endpoint_count) {
716 return -EINVAL;
717 }
718 for (size_t i = 0; i < req->endpoint_count; i++) {
719 if (req->endpoint_array[i] !=
720 obj->desc.emad[i].mapd.endpoint_id) {
721 return -EINVAL;
722 }
723 }
724 if (!obj->in_use) {
725 return -EACCES;
726 }
727 obj->in_use--;
728 return 0;
729 }
730
731 /**
732 * trusty_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
733 * @client: Client state.
734 * @handle_low: Unique handle of shared memory object to relaim. Bit[31:0].
735 * @handle_high: Unique handle of shared memory object to relaim. Bit[63:32].
736 * @flags: Unsupported, ignored.
737 *
738 * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
739 * Used by non-secure os reclaim memory previously shared with secure os.
740 *
741 * Return: 0 on success, error code on failure.
742 */
trusty_ffa_mem_reclaim(struct trusty_shmem_client_state * client,uint32_t handle_low,uint32_t handle_high,uint32_t flags)743 static int trusty_ffa_mem_reclaim(struct trusty_shmem_client_state *client,
744 uint32_t handle_low, uint32_t handle_high,
745 uint32_t flags)
746 {
747 int ret;
748 struct trusty_shmem_obj *obj;
749 uint64_t handle = handle_low | (((uint64_t)handle_high) << 32);
750
751 if (client->receiver) {
752 NOTICE("%s: unsupported share direction\n", __func__);
753 return -EINVAL;
754 }
755
756 if (flags) {
757 NOTICE("%s: unsupported flags 0x%x\n", __func__, flags);
758 return -EINVAL;
759 }
760
761 obj = trusty_shmem_obj_lookup(&trusty_shmem_obj_state, handle);
762 if (!obj) {
763 return -ENOENT;
764 }
765 if (obj->in_use) {
766 return -EACCES;
767 }
768
769 ret = plat_mem_set_shared(&obj->desc, false);
770 if (ret) {
771 return ret;
772 }
773
774 trusty_shmem_obj_free(&trusty_shmem_obj_state, obj);
775
776 return 0;
777 }
778
779 /**
780 * trusty_ffa_rxtx_map - FFA_RXTX_MAP implementation.
781 * @client: Client state.
782 * @tx_address: Address of client's transmit buffer.
783 * @rx_address: Address of client's receive buffer.
784 * @page_count: Number of (contiguous) 4K pages per buffer.
785 *
786 * Implements the FF-A FFA_RXTX_MAP call.
787 * Used by non-secure os and secure os to register their RX/TX buffer pairs.
788 *
789 * Return: 0 on success, error code on failure.
790 */
trusty_ffa_rxtx_map(struct trusty_shmem_client_state * client,u_register_t tx_address,u_register_t rx_address,uint32_t page_count)791 static long trusty_ffa_rxtx_map(struct trusty_shmem_client_state *client,
792 u_register_t tx_address,
793 u_register_t rx_address,
794 uint32_t page_count)
795 {
796 int ret;
797 uintptr_t tx_va;
798 uintptr_t rx_va;
799 size_t buf_size = page_count * FFA_PAGE_SIZE;
800
801 if (!buf_size) {
802 NOTICE("%s: invalid page_count %u\n", __func__, page_count);
803 return -EINVAL;
804 }
805
806 if (client->buf_size) {
807 NOTICE("%s: buffer pair already registered\n", __func__);
808 return -EACCES;
809 }
810
811 if (client->identity_mapped) {
812 tx_va = tx_address;
813 rx_va = rx_address;
814 } else {
815 unsigned int attr = client->secure ? MT_SECURE : MT_NS;
816 ret = mmap_add_dynamic_region_alloc_va(tx_address, &tx_va,
817 buf_size,
818 attr | MT_RO_DATA);
819 if (ret) {
820 NOTICE("%s: failed to map tx buffer @ 0x%lx, size 0x%zx\n",
821 __func__, tx_address, buf_size);
822 goto err_map_tx;
823 }
824 ret = mmap_add_dynamic_region_alloc_va(rx_address, &rx_va,
825 buf_size,
826 attr | MT_RW_DATA);
827 if (ret) {
828 NOTICE("%s: failed to map rx buffer @ 0x%lx, size 0x%zx\n",
829 __func__, rx_address, buf_size);
830 goto err_map_rx;
831 }
832 }
833
834 client->buf_size = buf_size;
835 client->tx_buf = (const void *)tx_va;
836 client->rx_buf = (void *)rx_va;
837
838 return 0;
839
840 err_map_rx:
841 mmap_remove_dynamic_region(tx_va, buf_size);
842 err_map_tx:
843 return ret;
844 }
845
846 /**
847 * trusty_ffa_rxtx_unmap - FFA_RXTX_UNMAP implementation.
848 * @client: Client state.
849 * @id: Unsupported, ignored.
850 *
851 * Implements the FF-A FFA_RXTX_UNMAP call.
852 * Used by non-secure os and secure os to release their RX/TX buffer pairs.
853 *
854 * Return: 0 on success, error code on failure.
855 */
trusty_ffa_rxtx_unmap(struct trusty_shmem_client_state * client,uint32_t id)856 static long trusty_ffa_rxtx_unmap(struct trusty_shmem_client_state *client,
857 uint32_t id)
858 {
859 int ret;
860
861 if (!client->buf_size) {
862 NOTICE("%s: buffer pair not registered\n", __func__);
863 return -EINVAL;
864 }
865
866 if (!client->identity_mapped) {
867 ret = mmap_remove_dynamic_region((uintptr_t)client->tx_buf,
868 client->buf_size);
869 if (ret) {
870 NOTICE("%s: failed to unmap tx buffer @ %p, size 0x%zx\n",
871 __func__, client->tx_buf, client->buf_size);
872 }
873 ret = mmap_remove_dynamic_region((uintptr_t)client->rx_buf,
874 client->buf_size);
875 if (ret) {
876 NOTICE("%s: failed to unmap rx buffer @ %p, size 0x%zx\n",
877 __func__, client->rx_buf, client->buf_size);
878 }
879 }
880 if (trusty_shmem_obj_state.allocated) {
881 WARN("%s: shared memory regions are still active\n", __func__);
882 }
883
884 client->buf_size = 0;
885 client->tx_buf = NULL;
886 client->rx_buf = NULL;
887 return 0;
888 }
889
890 /**
891 * trusty_ffa_id_get - FFA_ID_GET implementation.
892 * @flags: State flags.
893 * @idp: Pointer to store id return value in.
894 *
895 * Return the ID of the caller. For the non-secure client, use ID 0 as required
896 * by FF-A.
897 *
898 * Note that the sender_id check in trusty_ffa_mem_frag_tx and
899 * trusty_ffa_mem_frag_rx only works when there is no hypervisor because we use
900 * id 0. The spec says the sender_id field must be 0 in that case.
901 *
902 * Return: 0 on success, error code on failure.
903 */
trusty_ffa_id_get(u_register_t flags,u_register_t * idp)904 static int trusty_ffa_id_get(u_register_t flags, u_register_t *idp)
905 {
906 if (is_caller_secure(flags)) {
907 *idp = trusty_sp.sp_id;
908 } else {
909 *idp = trusty_ns_client.ep_id;
910 }
911
912 return 0;
913 }
914
915 /**
916 * trusty_ffa_partition_info_get - FFA_PARTITION_INFO_GET implementation.
917 * @client: Client state
918 * @uuid_0: uuid 0
919 * @uuid_1: uuid 1
920 * @uuid_2 uuid 2
921 * @uuid_3: uuid 3
922 * @ret2: Pointer to return value2 on success. Contains partition
923 * count in case of UUID match.
924 * @ret3: Pointer to return value3 on success. Contains the size
925 * of each partition descriptor
926 *
927 * Return: 0 on success, error code on failure.
928 */
trusty_ffa_partition_info_get(struct trusty_shmem_client_state * client,uint32_t uuid_0,uint32_t uuid_1,uint32_t uuid_2,uint32_t uuid_3,u_register_t * ret2,u_register_t * ret3)929 static long trusty_ffa_partition_info_get(
930 struct trusty_shmem_client_state *client,
931 uint32_t uuid_0,
932 uint32_t uuid_1,
933 uint32_t uuid_2,
934 uint32_t uuid_3,
935 u_register_t *ret2,
936 u_register_t *ret3)
937 {
938 uint32_t uuid[4] = { uuid_0, uuid_1, uuid_2, uuid_3 };
939
940 if (!memcmp(trusty_sp.uuid, uuid, sizeof(uuid)) ||
941 (uuid[0] == 0 && uuid[1] == 0 && uuid[2] == 0 && uuid[3] == 0)) {
942 struct ffa_partition_info *info;
943
944 info = (struct ffa_partition_info *)client->rx_buf;
945
946 info->id = trusty_sp.sp_id;
947 info->execution_ctx_count = PLATFORM_CORE_COUNT;
948 info->properties = trusty_sp.properties;
949
950 *ret2 = 1;
951 *ret3 = sizeof(info);
952 return 0;
953 }
954
955 return -ENOENT;
956 }
957
958 /**
959 * trusty_ffa_rx_release - FFA_RX_RELEASE implementation.
960 * @client: Client state.
961 *
962 * Return: 0 on success, error code on failure.
963 */
trusty_ffa_rx_release(struct trusty_shmem_client_state * client)964 static long trusty_ffa_rx_release(struct trusty_shmem_client_state *client)
965 {
966 /* Trusty SPD doesn't track mailbox state */
967 return 0;
968 }
969
970 /**
971 * trusty_ffa_version - FFA_VERSION implementation.
972 * @client: Client state.
973 * @version_in: Version supported by client.
974 * @smc_handle: Handle passed to smc call. Used to return version or error code
975 * directly as this call does not use the FFA_SUCCESS_SMC32 and
976 * FFA_ERROR opcodes that the other calls use.
977 *
978 * Return: 0 on success, error code on failure.
979 */
trusty_ffa_version(struct trusty_shmem_client_state * client,uint32_t version_in,void * smc_handle)980 static long trusty_ffa_version(struct trusty_shmem_client_state *client,
981 uint32_t version_in, void *smc_handle)
982 {
983 if (version_in & (1U << 31)) {
984 goto err_not_suppoprted;
985 }
986
987 /*
988 * We only implement one version. If the client specified a newer major
989 * version than ours, return the version we suppoort. Otherwise return
990 * not-supported.
991 */
992 if (FFA_VERSION_TO_MAJOR(version_in) >=
993 TRUSTY_FFA_CURRENT_VERSION_MAJOR) {
994 SMC_RET8(smc_handle, MAKE_TRUSTY_FFA_CURRENT_VERSION,
995 0, 0, 0, 0, 0, 0, 0);
996 }
997
998 err_not_suppoprted:
999 SMC_RET1(smc_handle, (uint32_t)FFA_ERROR_NOT_SUPPORTED);
1000 }
1001
1002 /**
1003 * trusty_ffa_features - FFA_FEATURES implementation.
1004 * @client: Client state.
1005 * @func: Api to check.
1006 * @props: Input properties.
1007 * @ret2: Pointer to return value2 on success.
1008 * @ret3: Pointer to return value3 on success.
1009 *
1010 * Return: 0 on success, error code on failure.
1011 */
trusty_ffa_features(struct trusty_shmem_client_state * client,uint32_t func,uint32_t props,u_register_t * ret2,u_register_t * ret3)1012 static int trusty_ffa_features(struct trusty_shmem_client_state *client,
1013 uint32_t func, uint32_t props, u_register_t *ret2,
1014 u_register_t *ret3)
1015 {
1016 if (SMC_ENTITY(func) != SMC_ENTITY_SHARED_MEMORY ||
1017 !SMC_IS_FASTCALL(func)) {
1018 return -EINVAL;
1019 }
1020 switch (func) {
1021 case FFA_ERROR:
1022 case FFA_SUCCESS_SMC32:
1023 case FFA_VERSION:
1024 case FFA_FEATURES:
1025 case FFA_RXTX_UNMAP:
1026 case FFA_ID_GET:
1027 case FFA_MEM_RETRIEVE_RESP:
1028 case FFA_MEM_FRAG_RX:
1029 case FFA_MEM_FRAG_TX:
1030 return 0;
1031
1032 case FFA_RXTX_MAP_SMC32:
1033 case FFA_RXTX_MAP_SMC64:
1034 *ret2 = FFA_FEATURES2_RXTX_MAP_BUF_SIZE_4K;
1035 return 0;
1036
1037 case FFA_MEM_RETRIEVE_REQ_SMC32:
1038 case FFA_MEM_RETRIEVE_REQ_SMC64:
1039 /*
1040 * Indicate that object can be retrieved up to 2^64 - 1 times
1041 * (on a 64 bit build). We track the number of times an object
1042 * had been retrieved in a variable of type size_t.
1043 */
1044 *ret3 = sizeof(size_t) * 8 - 1;
1045 *ret2 = 0;
1046 if (props & FFA_FEATURES2_MEM_RETRIEVE_REQ_NS_BIT) {
1047 *ret2 |= FFA_FEATURES2_MEM_RETRIEVE_REQ_NS_BIT;
1048 client->use_ns_bit = true;
1049 }
1050 return 0;
1051
1052 case FFA_MEM_SHARE_SMC32:
1053 case FFA_MEM_SHARE_SMC64:
1054 case FFA_MEM_RELINQUISH:
1055 case FFA_MEM_RECLAIM:
1056 *ret2 = 0;
1057 return 0;
1058
1059 default:
1060 return -ENOTSUP;
1061 }
1062 }
1063
1064 /**
1065 * to_spi_err - Convert from local error code to FF-A error code.
1066 * @ret: Local error code.
1067 *
1068 * Return: FF-A defined error code.
1069 */
to_spi_err(long ret)1070 static int to_spi_err(long ret)
1071 {
1072 switch(ret) {
1073 case -ENOMEM:
1074 return FFA_ERROR_NO_MEMORY;
1075 case -EINVAL:
1076 case -ENOENT:
1077 return FFA_ERROR_INVALID_PARAMETER;
1078 case -EACCES:
1079 return FFA_ERROR_DENIED;
1080 case -ENOTSUP:
1081 return FFA_ERROR_NOT_SUPPORTED;
1082 default:
1083 return FFA_ERROR_INVALID_PARAMETER;
1084 }
1085 }
1086
1087 /*
1088 * trusty_shared_memory_smc - SMC call handler.
1089 */
spmd_ffa_smc_handler(uint32_t smc_fid,u_register_t x1,u_register_t x2,u_register_t x3,u_register_t x4,void * cookie,void * handle,u_register_t flags)1090 uintptr_t spmd_ffa_smc_handler(uint32_t smc_fid,
1091 u_register_t x1,
1092 u_register_t x2,
1093 u_register_t x3,
1094 u_register_t x4,
1095 void *cookie,
1096 void *handle,
1097 u_register_t flags)
1098 {
1099 long ret = -1;
1100 /*
1101 * Some arguments to FF-A functions are specified to come from 32 bit
1102 * (w) registers. Create 32 bit copies of the 64 bit arguments that can
1103 * be passed to these functions.
1104 */
1105 uint32_t w1 = (uint32_t)x1;
1106 uint32_t w2 = (uint32_t)x2;
1107 uint32_t w3 = (uint32_t)x3;
1108 uint32_t w4 = (uint32_t)x4;
1109 u_register_t ret_reg2 = 0;
1110 u_register_t ret_reg3 = 0;
1111 struct trusty_shmem_client_state *client;
1112
1113 if (((smc_fid < SMC_FC32_FFA_MIN) || (smc_fid > SMC_FC32_FFA_MAX)) &&
1114 ((smc_fid < SMC_FC64_FFA_MIN) || (smc_fid > SMC_FC64_FFA_MAX))) {
1115 NOTICE("%s(0x%x) unknown smc\n", __func__, smc_fid);
1116 SMC_RET1(handle, SMC_UNK);
1117 }
1118
1119 spin_lock(&trusty_shmem_obj_state.lock);
1120
1121 if (is_caller_secure(flags)) {
1122 client = trusty_sp.client;
1123 } else {
1124 client = trusty_ns_client.client;
1125 }
1126
1127 switch (smc_fid) {
1128 case FFA_VERSION:
1129 ret = trusty_ffa_version(client, w1, handle);
1130 break;
1131
1132 case FFA_FEATURES:
1133 ret = trusty_ffa_features(client, w1, w2, &ret_reg2, &ret_reg3);
1134 break;
1135
1136 case FFA_RXTX_MAP_SMC32:
1137 ret = trusty_ffa_rxtx_map(client, w1, w2, w3);
1138 break;
1139
1140 case FFA_RXTX_MAP_SMC64:
1141 ret = trusty_ffa_rxtx_map(client, x1, x2, w3);
1142 break;
1143
1144 case FFA_RXTX_UNMAP:
1145 ret = trusty_ffa_rxtx_unmap(client, w1);
1146 break;
1147
1148 case FFA_RX_RELEASE:
1149 ret = trusty_ffa_rx_release(client);
1150 break;
1151
1152 case FFA_PARTITION_INFO_GET:
1153 ret = trusty_ffa_partition_info_get(client, w1, w2, w3, w4,
1154 &ret_reg2, &ret_reg3);
1155 break;
1156
1157 case FFA_ID_GET:
1158 ret = trusty_ffa_id_get(flags, &ret_reg2);
1159 break;
1160
1161 case FFA_MEM_LEND_SMC32:
1162 ret = trusty_ffa_mem_share(client, w1, w2, w3, w4,
1163 FFA_MTD_FLAG_TYPE_LEND_MEMORY,
1164 handle);
1165 break;
1166
1167 case FFA_MEM_LEND_SMC64:
1168 ret = trusty_ffa_mem_share(client, w1, w2, x3, w4,
1169 FFA_MTD_FLAG_TYPE_LEND_MEMORY,
1170 handle);
1171 break;
1172
1173 case FFA_MEM_SHARE_SMC32:
1174 ret = trusty_ffa_mem_share(client, w1, w2, w3, w4,
1175 FFA_MTD_FLAG_TYPE_SHARE_MEMORY,
1176 handle);
1177 break;
1178
1179 case FFA_MEM_SHARE_SMC64:
1180 ret = trusty_ffa_mem_share(client, w1, w2, x3, w4,
1181 FFA_MTD_FLAG_TYPE_SHARE_MEMORY,
1182 handle);
1183 break;
1184
1185 case FFA_MEM_RETRIEVE_REQ_SMC32:
1186 ret = trusty_ffa_mem_retrieve_req(client, w1, w2, w3, w4,
1187 handle);
1188 break;
1189
1190 case FFA_MEM_RETRIEVE_REQ_SMC64:
1191 ret = trusty_ffa_mem_retrieve_req(client, w1, w2, x3, w4,
1192 handle);
1193 break;
1194
1195 case FFA_MEM_RELINQUISH:
1196 ret = trusty_ffa_mem_relinquish(client);
1197 break;
1198
1199 case FFA_MEM_RECLAIM:
1200 ret = trusty_ffa_mem_reclaim(client, w1, w2, w3);
1201 break;
1202
1203 case FFA_MEM_FRAG_RX:
1204 ret = trusty_ffa_mem_frag_rx(client, w1, w2, w3, w4, handle);
1205 break;
1206
1207 case FFA_MEM_FRAG_TX:
1208 ret = trusty_ffa_mem_frag_tx(client, w1, w2, w3, w4, handle);
1209 break;
1210
1211 default:
1212 NOTICE("%s(0x%x, 0x%lx) unsupported ffa smc\n", __func__,
1213 smc_fid, x1);
1214 ret = -ENOTSUP;
1215 break;
1216 }
1217 spin_unlock(&trusty_shmem_obj_state.lock);
1218
1219 if (ret) {
1220 if (ret == (int64_t)handle) {
1221 /* return value already encoded, pass through */
1222 return ret;
1223 }
1224 NOTICE("%s(0x%x) failed %ld\n", __func__, smc_fid, ret);
1225 SMC_RET8(handle, FFA_ERROR, 0, to_spi_err(ret), 0, 0, 0,
1226 0, 0);
1227 } else {
1228 SMC_RET8(handle, FFA_SUCCESS_SMC32, 0, ret_reg2, ret_reg3, 0,
1229 0, 0, 0);
1230 }
1231 }
1232
1233 /**
1234 * trusty_shared_mem_init - Initialize Trusty secure and non-secure endpoints.
1235 * @trusty_uuid: Pointer to Trusty UUID
1236 *
1237 * Returns: none
1238 */
trusty_shared_mem_init(const uuid_t * trusty_uuid)1239 void trusty_shared_mem_init(const uuid_t *trusty_uuid)
1240 {
1241 /*
1242 * Initialize secure side of Trusty that implements FFA_MEM_ABIs
1243 */
1244 trusty_sp.sp_id = FFA_SWLD_ID_BASE;
1245 trusty_sp.uuid = trusty_uuid;
1246 trusty_sp.properties = 0; /* Doesn't support DIRECT MSG */
1247 trusty_sp.ffa_version = MAKE_TRUSTY_FFA_CURRENT_VERSION;
1248 trusty_sp.client = &trusty_shmem_client_state[true];
1249
1250 /*
1251 * Initialize non-secure client endpoint.
1252 */
1253 trusty_ns_client.ep_id = FFA_NWLD_ID_BASE;
1254 trusty_ns_client.client = &trusty_shmem_client_state[false];
1255 }
1256