1 /*
2 * Copyright (c) 2014-2015, Google, Inc. All rights reserved
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <compiler.h>
26 #include <err.h>
27 #include <kernel/mutex.h>
28 #include <kernel/thread.h>
29 #include <reflist.h>
30 #include <stddef.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <trace.h>
34
35 #include <kernel/vm.h>
36 #include <lk/init.h>
37 #include <lk/reflist.h>
38
39 #include <virtio/virtio_config.h>
40 #include <virtio/virtio_ring.h>
41 #include "vqueue.h"
42
43 #include "trusty_virtio.h"
44
45 #include <lib/trusty/handle.h>
46 #include <lib/trusty/ipc.h>
47 #include <lib/trusty/ipc_msg.h>
48 #include <lib/trusty/memref.h>
49 #include <lib/trusty/tipc_virtio_dev.h>
50
51 #include <inttypes.h>
52 #include <uapi/mm.h>
53
54 #define LOCAL_TRACE 0
55
56 #define MAX_RX_IOVS 1
57 #define MAX_TX_IOVS 1
58
59 /*
60 * Control endpoint address
61 */
62 #define TIPC_CTRL_ADDR (53)
63
64 /*
65 * Max number of opned channels supported
66 */
67 #define TIPC_ADDR_MAX_NUM 256
68
69 /*
70 * Local adresses base
71 */
72 #define TIPC_ADDR_BASE 1000
73
74 /*
75 * Maximum service name size
76 */
77 #define TIPC_MAX_SRV_NAME_LEN (256)
78
79 struct tipc_ept {
80 uint32_t remote;
81 struct handle* chan;
82 };
83
84 struct tipc_dev {
85 struct vdev vd;
86 const uuid_t* uuid;
87 const void* descr_ptr;
88 size_t descr_size;
89
90 struct vqueue vqs[TIPC_VQ_NUM];
91 struct tipc_ept epts[TIPC_ADDR_MAX_NUM];
92 unsigned long inuse[BITMAP_NUM_WORDS(TIPC_ADDR_MAX_NUM)];
93 struct vqueue_mapped_list send_mapped;
94 struct vqueue_mapped_list receive_mapped;
95
96 event_t have_handles;
97 struct handle_list handle_list;
98
99 mutex_t ept_lock;
100
101 thread_t* rx_thread;
102 thread_t* tx_thread;
103
104 bool tx_stop;
105 bool rx_stop;
106 bool reuse_mapping;
107 };
108
109 struct tipc_shm {
110 uint64_t obj_id;
111 uint64_t size;
112 uint64_t tag;
113 } __PACKED;
114
115 struct tipc_hdr {
116 uint32_t src;
117 uint32_t dst;
118 uint16_t reserved;
119 uint16_t shm_cnt;
120 uint16_t len;
121 uint16_t flags;
122 uint8_t data[0];
123 } __PACKED;
124
125 enum tipc_ctrl_msg_types {
126 TIPC_CTRL_MSGTYPE_GO_ONLINE = 1,
127 TIPC_CTRL_MSGTYPE_GO_OFFLINE,
128 TIPC_CTRL_MSGTYPE_CONN_REQ,
129 TIPC_CTRL_MSGTYPE_CONN_RSP,
130 TIPC_CTRL_MSGTYPE_DISC_REQ,
131 TIPC_CTRL_MSGTYPE_RELEASE,
132 TIPC_CTRL_MSGTYPE_REUSE_MSGBUF_REQ,
133 TIPC_CTRL_MSGTYPE_REUSE_MSGBUF_RSP,
134 TIPC_CTRL_MSGTYPE_UNMAP_REQ,
135 TIPC_CTRL_MSGTYPE_UNMAP_RSP,
136 };
137
138 /*
139 * TIPC control message consists of common tipc_ctrl_msg_hdr
140 * immediately followed by message specific body which also
141 * could be empty.
142 *
143 * struct tipc_ctrl_msg {
144 * struct tipc_ctrl_msg_hdr hdr;
145 * uint8_t body[0];
146 * } __PACKED;
147 *
148 */
149 struct tipc_ctrl_msg_hdr {
150 uint32_t type;
151 uint32_t body_len;
152 } __PACKED;
153
154 struct tipc_conn_req_body {
155 char name[TIPC_MAX_SRV_NAME_LEN];
156 } __PACKED;
157
158 struct tipc_conn_rsp_body {
159 uint32_t target;
160 int32_t status;
161 uint32_t remote;
162 uint32_t max_msg_size;
163 uint32_t max_msg_cnt;
164 } __PACKED;
165
166 struct tipc_disc_req_body {
167 uint32_t target;
168 } __PACKED;
169
170 struct tipc_release_body {
171 uint64_t id;
172 } __PACKED;
173
174 struct tipc_unmap_req_body {
175 uint64_t id;
176 } __PACKED;
177
178 struct tipc_unmap_rsp_body {
179 int32_t result;
180 uint64_t id;
181 } __PACKED;
182
183 typedef int (*tipc_data_cb_t)(uint8_t* dst, size_t sz, void* ctx);
184
185 struct tipc_ext_mem {
186 struct vmm_obj vmm_obj;
187 struct vmm_obj* ext_mem;
188 struct obj_ref ext_mem_ref;
189 struct tipc_dev* dev;
190 bool suppress_release;
191 };
192
tipc_ext_mem_check_flags(struct vmm_obj * obj,uint * arch_mmu_flags)193 static int tipc_ext_mem_check_flags(struct vmm_obj* obj, uint* arch_mmu_flags) {
194 struct tipc_ext_mem* tem = containerof(obj, struct tipc_ext_mem, vmm_obj);
195 return tem->ext_mem->ops->check_flags(tem->ext_mem, arch_mmu_flags);
196 }
197
tipc_ext_mem_get_page(struct vmm_obj * obj,size_t offset,paddr_t * paddr,size_t * paddr_size)198 static int tipc_ext_mem_get_page(struct vmm_obj* obj,
199 size_t offset,
200 paddr_t* paddr,
201 size_t* paddr_size) {
202 struct tipc_ext_mem* tem = containerof(obj, struct tipc_ext_mem, vmm_obj);
203 return tem->ext_mem->ops->get_page(tem->ext_mem, offset, paddr, paddr_size);
204 }
205
206 status_t release_shm(struct tipc_dev* dev, uint64_t shm_id);
207
tipc_ext_mem_destroy(struct vmm_obj * obj)208 void tipc_ext_mem_destroy(struct vmm_obj* obj) {
209 struct tipc_ext_mem* tem = containerof(obj, struct tipc_ext_mem, vmm_obj);
210 struct ext_mem_obj* ext_mem =
211 containerof(tem->ext_mem, struct ext_mem_obj, vmm_obj);
212 /* Save the ext_mem ID as we're about to drop a reference to it */
213 ext_mem_obj_id_t ext_mem_id = ext_mem->id;
214 vmm_obj_del_ref(tem->ext_mem, &tem->ext_mem_ref);
215 /* In case this was the last reference, tell NS to try reclaiming */
216 if (!tem->suppress_release) {
217 if (release_shm(tem->dev, ext_mem_id) != NO_ERROR) {
218 TRACEF("Failed to release external memory: 0x%" PRIx64 "\n",
219 ext_mem_id);
220 }
221 }
222 free(tem);
223 }
224
225 static struct vmm_obj_ops tipc_ext_mem_ops = {
226 .check_flags = tipc_ext_mem_check_flags,
227 .get_page = tipc_ext_mem_get_page,
228 .destroy = tipc_ext_mem_destroy,
229 };
230
vmm_obj_is_tipc_ext_mem(struct vmm_obj * obj)231 static bool vmm_obj_is_tipc_ext_mem(struct vmm_obj* obj) {
232 return obj->ops == &tipc_ext_mem_ops;
233 }
234
vmm_obj_to_tipc_ext_mem(struct vmm_obj * obj)235 static struct tipc_ext_mem* vmm_obj_to_tipc_ext_mem(struct vmm_obj* obj) {
236 if (vmm_obj_is_tipc_ext_mem(obj)) {
237 return containerof(obj, struct tipc_ext_mem, vmm_obj);
238 } else {
239 return NULL;
240 }
241 }
242
tipc_ext_mem_vmm_obj_to_ext_mem_vmm_obj(struct vmm_obj * obj)243 struct vmm_obj* tipc_ext_mem_vmm_obj_to_ext_mem_vmm_obj(struct vmm_obj* obj) {
244 struct tipc_ext_mem* tem = vmm_obj_to_tipc_ext_mem(obj);
245 if (!tem) {
246 return NULL;
247 }
248 return tem->ext_mem;
249 }
250
tipc_ext_mem_initialize(struct tipc_ext_mem * tem,struct tipc_dev * dev,struct vmm_obj * ext_mem,struct obj_ref * ref)251 static void tipc_ext_mem_initialize(struct tipc_ext_mem* tem,
252 struct tipc_dev* dev,
253 struct vmm_obj* ext_mem,
254 struct obj_ref* ref) {
255 tem->dev = dev;
256 tem->ext_mem = ext_mem;
257 vmm_obj_add_ref(tem->ext_mem, &tem->ext_mem_ref);
258 tem->vmm_obj.ops = &tipc_ext_mem_ops;
259 obj_init(&tem->vmm_obj.obj, ref);
260 }
261
262 static int tipc_send_data(struct tipc_dev* dev,
263 uint32_t local,
264 uint32_t remote,
265 tipc_data_cb_t cb,
266 void* cb_ctx,
267 uint16_t data_len,
268 bool wait);
269
270 static int tipc_send_buf(struct tipc_dev* dev,
271 uint32_t local,
272 uint32_t remote,
273 void* data,
274 uint16_t data_len,
275 bool wait);
276
277 #define vdev_to_dev(v) containerof((v), struct tipc_dev, vd)
278
addr_to_slot(uint32_t addr)279 static inline uint addr_to_slot(uint32_t addr) {
280 if (addr < TIPC_ADDR_BASE) {
281 TRACEF("bad addr %u\n", addr);
282 return TIPC_ADDR_MAX_NUM; /* return invalid slot number */
283 }
284 return (uint)(addr - TIPC_ADDR_BASE);
285 }
286
slot_to_addr(uint slot)287 static inline uint32_t slot_to_addr(uint slot) {
288 return (uint32_t)(slot + TIPC_ADDR_BASE);
289 }
290
alloc_local_addr(struct tipc_dev * dev,uint32_t remote,struct handle * chan)291 static uint32_t alloc_local_addr(struct tipc_dev* dev,
292 uint32_t remote,
293 struct handle* chan) {
294 int slot = bitmap_ffz(dev->inuse, TIPC_ADDR_MAX_NUM);
295 if (slot >= 0) {
296 bitmap_set(dev->inuse, slot);
297 dev->epts[slot].chan = chan;
298 dev->epts[slot].remote = remote;
299 return slot_to_addr(slot);
300 }
301 return 0;
302 }
303
lookup_ept(struct tipc_dev * dev,uint32_t local)304 static struct tipc_ept* lookup_ept(struct tipc_dev* dev, uint32_t local) {
305 uint slot = addr_to_slot(local);
306 if (slot < TIPC_ADDR_MAX_NUM) {
307 if (bitmap_test(dev->inuse, slot)) {
308 return &dev->epts[slot];
309 }
310 }
311 return NULL;
312 }
313
ept_to_addr(struct tipc_dev * dev,struct tipc_ept * ept)314 static uint32_t ept_to_addr(struct tipc_dev* dev, struct tipc_ept* ept) {
315 return slot_to_addr(ept - dev->epts);
316 }
317
free_local_addr(struct tipc_dev * dev,uint32_t local)318 static void free_local_addr(struct tipc_dev* dev, uint32_t local) {
319 uint slot = addr_to_slot(local);
320
321 if (slot < TIPC_ADDR_MAX_NUM) {
322 bitmap_clear(dev->inuse, slot);
323 dev->epts[slot].chan = NULL;
324 dev->epts[slot].remote = 0;
325 }
326 }
327
_go_online(struct tipc_dev * dev)328 static int _go_online(struct tipc_dev* dev) {
329 struct {
330 struct tipc_ctrl_msg_hdr hdr;
331 /* body is empty */
332 } msg;
333
334 msg.hdr.type = TIPC_CTRL_MSGTYPE_GO_ONLINE;
335 msg.hdr.body_len = 0;
336
337 return tipc_send_buf(dev, TIPC_CTRL_ADDR, TIPC_CTRL_ADDR, &msg, sizeof(msg),
338 true);
339 }
340
341 /*
342 * When getting a notify for the TX vq, it is the other side telling us
343 * that buffers are now available
344 */
tipc_tx_vq_notify_cb(struct vqueue * vq,void * priv)345 static int tipc_tx_vq_notify_cb(struct vqueue* vq, void* priv) {
346 vqueue_signal_avail(vq);
347 return 0;
348 }
349
tipc_rx_vq_notify_cb(struct vqueue * vq,void * priv)350 static int tipc_rx_vq_notify_cb(struct vqueue* vq, void* priv) {
351 vqueue_signal_avail(vq);
352 return 0;
353 }
354
355 static const vqueue_cb_t notify_cbs[TIPC_VQ_NUM] = {
356 [TIPC_VQ_TX] = tipc_tx_vq_notify_cb,
357 [TIPC_VQ_RX] = tipc_rx_vq_notify_cb,
358 };
359
send_conn_rsp(struct tipc_dev * dev,uint32_t local,uint32_t remote,int32_t status,uint32_t msg_sz,uint32_t msg_cnt)360 static int send_conn_rsp(struct tipc_dev* dev,
361 uint32_t local,
362 uint32_t remote,
363 int32_t status,
364 uint32_t msg_sz,
365 uint32_t msg_cnt) {
366 struct {
367 struct tipc_ctrl_msg_hdr hdr;
368 struct tipc_conn_rsp_body body;
369 } msg;
370
371 msg.hdr.type = TIPC_CTRL_MSGTYPE_CONN_RSP;
372 msg.hdr.body_len = sizeof(msg.body);
373
374 msg.body.target = remote;
375 msg.body.status = status;
376 msg.body.remote = local;
377 msg.body.max_msg_size = msg_sz;
378 msg.body.max_msg_cnt = msg_cnt;
379
380 return tipc_send_buf(dev, TIPC_CTRL_ADDR, TIPC_CTRL_ADDR, &msg, sizeof(msg),
381 true);
382 }
383
send_disc_req(struct tipc_dev * dev,uint32_t local,uint32_t remote)384 static int send_disc_req(struct tipc_dev* dev,
385 uint32_t local,
386 uint32_t remote) {
387 struct {
388 struct tipc_ctrl_msg_hdr hdr;
389 struct tipc_disc_req_body body;
390 } msg;
391
392 msg.hdr.type = TIPC_CTRL_MSGTYPE_DISC_REQ;
393 msg.hdr.body_len = sizeof(msg.body);
394
395 msg.body.target = remote;
396
397 return tipc_send_buf(dev, local, TIPC_CTRL_ADDR, &msg, sizeof(msg), true);
398 }
399
handle_conn_req(struct tipc_dev * dev,uint32_t remote,const volatile struct tipc_conn_req_body * ns_req)400 static int handle_conn_req(struct tipc_dev* dev,
401 uint32_t remote,
402 const volatile struct tipc_conn_req_body* ns_req) {
403 int err;
404 uint32_t local = 0;
405 struct handle* chan = NULL;
406 struct tipc_conn_req_body req;
407
408 LTRACEF("remote %u\n", remote);
409
410 strncpy(req.name, (const char*)ns_req->name, sizeof(req.name));
411
412 /* open ipc channel */
413 err = ipc_port_connect_async(dev->uuid, req.name, sizeof(req.name), 0,
414 &chan);
415 if (err == NO_ERROR) {
416 mutex_acquire(&dev->ept_lock);
417 local = alloc_local_addr(dev, remote, chan);
418 if (local == 0) {
419 TRACEF("failed to alloc local address\n");
420 handle_decref(chan);
421 chan = NULL;
422 }
423 mutex_release(&dev->ept_lock);
424 }
425
426 if (chan) {
427 LTRACEF("new handle: local = 0x%x remote = 0x%x\n", local, remote);
428 handle_set_cookie(chan, lookup_ept(dev, local));
429 handle_list_add(&dev->handle_list, chan);
430 event_signal(&dev->have_handles, false);
431 return NO_ERROR;
432 }
433
434 err = send_conn_rsp(dev, local, remote, ERR_NO_RESOURCES, 0, 0);
435 if (err) {
436 TRACEF("failed (%d) to send response\n", err);
437 }
438
439 return err;
440 }
441
handle_disc_req(struct tipc_dev * dev,uint32_t remote,const volatile struct tipc_disc_req_body * ns_req)442 static int handle_disc_req(struct tipc_dev* dev,
443 uint32_t remote,
444 const volatile struct tipc_disc_req_body* ns_req) {
445 struct tipc_ept* ept;
446 uint32_t target = ns_req->target;
447
448 LTRACEF("remote %u: target %u\n", remote, target);
449
450 mutex_acquire(&dev->ept_lock);
451
452 /* Ultimately we have to lookup channel by remote address.
453 * Local address is also provided by remote side but there
454 * is a scenario when it might not be valid. Nevertheless,
455 * we can try to use it first before doing full lookup.
456 */
457 ept = lookup_ept(dev, target);
458 if (!ept || ept->remote != remote) {
459 ept = NULL;
460 /* do full search: TODO search handle list */
461 for (uint slot = 0; slot < countof(dev->epts); slot++) {
462 if (bitmap_test(dev->inuse, slot)) {
463 if (dev->epts[slot].remote == remote) {
464 ept = &dev->epts[slot];
465 break;
466 }
467 }
468 }
469 }
470
471 if (ept) {
472 struct handle* chan = ept->chan;
473
474 if (chan) {
475 /* detach handle from handle list */
476 handle_list_del(&dev->handle_list, chan);
477
478 /* detach ept */
479 handle_set_cookie(chan, NULL);
480
481 /* close handle */
482 handle_decref(chan);
483 }
484
485 free_local_addr(dev, ept_to_addr(dev, ept));
486 }
487
488 mutex_release(&dev->ept_lock);
489
490 return NO_ERROR;
491 }
492
handle_reuse_msgbuf_req(struct tipc_dev * dev,uint32_t remote)493 static int handle_reuse_msgbuf_req(struct tipc_dev* dev, uint32_t remote) {
494 struct {
495 struct tipc_ctrl_msg_hdr hdr;
496 } msg;
497
498 /* on or off based on request */
499 dev->reuse_mapping = true;
500
501 /* send response */
502 msg.hdr.type = TIPC_CTRL_MSGTYPE_REUSE_MSGBUF_RSP;
503 msg.hdr.body_len = 0;
504
505 return tipc_send_buf(dev, TIPC_CTRL_ADDR, TIPC_CTRL_ADDR, &msg, sizeof(msg),
506 true);
507 }
508
handle_unmap_req(struct tipc_dev * dev,uint32_t remote,const volatile struct tipc_unmap_req_body * ns_req)509 static int handle_unmap_req(struct tipc_dev* dev,
510 uint32_t remote,
511 const volatile struct tipc_unmap_req_body* ns_req) {
512 struct vqueue_mapped_list* mapped[2];
513 struct {
514 struct tipc_ctrl_msg_hdr hdr;
515 struct tipc_unmap_rsp_body body;
516 } msg;
517
518 mapped[0] = &dev->send_mapped;
519 mapped[1] = &dev->receive_mapped;
520
521 /* try to unmap */
522 msg.body.result = vqueue_unmap_memid(ns_req->id, mapped, 2);
523
524 /* copy id from request to response so that host can reclaim */
525 msg.body.id = ns_req->id;
526
527 /* send response */
528 msg.hdr.type = TIPC_CTRL_MSGTYPE_UNMAP_RSP;
529 msg.hdr.body_len = sizeof(msg.body);
530
531 return tipc_send_buf(dev, TIPC_CTRL_ADDR, TIPC_CTRL_ADDR, &msg, sizeof(msg),
532 true);
533 }
534
handle_ctrl_msg(struct tipc_dev * dev,uint32_t remote,const volatile void * ns_data,size_t msg_len)535 static int handle_ctrl_msg(struct tipc_dev* dev,
536 uint32_t remote,
537 const volatile void* ns_data,
538 size_t msg_len) {
539 uint32_t msg_type;
540 size_t msg_body_len;
541 const volatile void* ns_msg_body;
542 const volatile struct tipc_ctrl_msg_hdr* ns_msg_hdr = ns_data;
543
544 DEBUG_ASSERT(ns_data);
545
546 /* do some safety checks */
547 if (msg_len < sizeof(struct tipc_ctrl_msg_hdr)) {
548 TRACEF("%s: remote=%u: ttl_len=%zu\n", "malformed msg", remote,
549 msg_len);
550 return ERR_NOT_VALID;
551 }
552
553 msg_type = ns_msg_hdr->type;
554 msg_body_len = ns_msg_hdr->body_len;
555 ns_msg_body = ns_data + sizeof(struct tipc_ctrl_msg_hdr);
556
557 if (sizeof(struct tipc_ctrl_msg_hdr) + msg_body_len != msg_len)
558 goto err_mailformed_msg;
559
560 switch (msg_type) {
561 case TIPC_CTRL_MSGTYPE_CONN_REQ:
562 if (msg_body_len != sizeof(struct tipc_conn_req_body))
563 break;
564 return handle_conn_req(dev, remote, ns_msg_body);
565
566 case TIPC_CTRL_MSGTYPE_DISC_REQ:
567 if (msg_body_len != sizeof(struct tipc_disc_req_body))
568 break;
569 return handle_disc_req(dev, remote, ns_msg_body);
570
571 case TIPC_CTRL_MSGTYPE_REUSE_MSGBUF_REQ:
572 if (msg_body_len != 0)
573 break;
574 return handle_reuse_msgbuf_req(dev, remote);
575
576 case TIPC_CTRL_MSGTYPE_UNMAP_REQ:
577 if (msg_body_len != sizeof(struct tipc_unmap_req_body))
578 break;
579 return handle_unmap_req(dev, remote, ns_msg_body);
580
581 default:
582 break;
583 }
584
585 err_mailformed_msg:
586 TRACEF("%s: remote=%u: ttl_len=%zu msg_type=%u msg_len=%zu\n",
587 "malformed msg", remote, msg_len, msg_type, msg_body_len);
588 return ERR_NOT_VALID;
589 }
590
591 /*
592 * Sets the suppression flag on a memref handle backed by a vmm_obj of a
593 * tipc_ext_mem.
594 */
suppress_handle(struct handle * handle)595 static void suppress_handle(struct handle* handle) {
596 struct vmm_obj* obj = memref_handle_to_vmm_obj(handle);
597 ASSERT(obj);
598 struct tipc_ext_mem* tem = vmm_obj_to_tipc_ext_mem(obj);
599 ASSERT(tem);
600 tem->suppress_release = true;
601 }
602
handle_chan_msg(struct tipc_dev * dev,uint32_t remote,uint32_t local,const volatile void * ns_data,size_t len,const volatile struct tipc_shm * shm,size_t shm_cnt)603 static int handle_chan_msg(struct tipc_dev* dev,
604 uint32_t remote,
605 uint32_t local,
606 const volatile void* ns_data,
607 size_t len,
608 const volatile struct tipc_shm* shm,
609 size_t shm_cnt) {
610 struct tipc_ept* ept;
611 int ret = ERR_NOT_FOUND;
612 size_t shm_idx = 0;
613 struct handle* handles[MAX_MSG_HANDLES];
614 struct ipc_msg_kern msg = {
615 .iov =
616 (struct iovec_kern[]){
617 [0] =
618 {
619 .iov_base = (void*)ns_data,
620 .iov_len = len,
621 },
622 },
623 .num_iov = 1,
624 .handles = handles,
625 .num_handles = shm_cnt,
626 };
627
628 LTRACEF("len=%zu, shm_cnt=%zu\n", len, shm_cnt);
629
630 if (shm_cnt > MAX_MSG_HANDLES) {
631 return ERR_INVALID_ARGS;
632 }
633
634 for (shm_idx = 0; shm_idx < shm_cnt; shm_idx++) {
635 struct vmm_obj* shm_obj;
636 struct obj_ref shm_ref;
637 struct tipc_ext_mem* tem;
638 struct obj_ref tem_ref;
639 /*
640 * Read out separately to prevent it from changing between the two calls
641 */
642 uint64_t size64 = READ_ONCE(shm[shm_idx].size);
643 if (size64 > SIZE_MAX) {
644 TRACEF("Received shm object larger than SIZE_MAX\n");
645 goto out;
646 }
647 size_t size = size64;
648
649 obj_ref_init(&shm_ref);
650 obj_ref_init(&tem_ref);
651
652 status_t ret =
653 ext_mem_get_vmm_obj(dev->vd.client_id, shm[shm_idx].obj_id,
654 shm[shm_idx].tag, size, &shm_obj, &shm_ref);
655 if (ret < 0) {
656 TRACEF("Failed to create ext_mem object\n");
657 goto out;
658 }
659
660 tem = calloc(1, sizeof(struct tipc_ext_mem));
661 if (!tem) {
662 TRACEF("calloc() failed\n");
663 ret = ERR_NO_MEMORY;
664 goto out;
665 }
666
667 tipc_ext_mem_initialize(tem, dev, shm_obj, &tem_ref);
668 vmm_obj_del_ref(shm_obj, &shm_ref);
669 shm_obj = NULL;
670
671 /* Temporarily set ext_mem_obj match_tag so memref can be created */
672 ext_mem_obj_set_match_tag(tem->ext_mem, shm[shm_idx].tag);
673
674 ret = memref_create_from_vmm_obj(
675 &tem->vmm_obj, 0, size,
676 MMAP_FLAG_PROT_READ | MMAP_FLAG_PROT_WRITE, &handles[shm_idx]);
677 if (ret != NO_ERROR) {
678 tem->suppress_release = true;
679 }
680
681 /* Clear match_tag so non-0 tags are unmappable by default */
682 ext_mem_obj_set_match_tag(tem->ext_mem, 0);
683
684 /*
685 * We want to release our local ref whether or not we made a handle
686 * successfully. If we made a handle, the handle's ref keeps it alive.
687 * If we didn't, we want it cleaned up.
688 */
689 vmm_obj_del_ref(&tem->vmm_obj, &tem_ref);
690
691 if (ret < 0) {
692 TRACEF("Failed to create memref\n");
693 goto out;
694 }
695 }
696
697 mutex_acquire(&dev->ept_lock);
698 ept = lookup_ept(dev, local);
699 if (ept && ept->remote == remote) {
700 if (ept->chan) {
701 ret = ipc_send_msg(ept->chan, &msg);
702 }
703 }
704 mutex_release(&dev->ept_lock);
705
706 out:
707 /* Tear down the successfully processed handles */
708 while (shm_idx > 0) {
709 shm_idx--;
710 if (ret < 0) {
711 LTRACEF("Suppressing handle release\n");
712 suppress_handle(handles[shm_idx]);
713 }
714 handle_decref(handles[shm_idx]);
715 }
716
717 return ret;
718 }
719
handle_rx_msg(struct tipc_dev * dev,struct vqueue_buf * buf)720 static int handle_rx_msg(struct tipc_dev* dev, struct vqueue_buf* buf) {
721 const volatile struct tipc_hdr* ns_hdr;
722 const volatile void* ns_data;
723 const volatile struct tipc_shm* ns_shm;
724 size_t ns_shm_cnt;
725 size_t ns_shm_len;
726 size_t ns_data_len;
727 uint32_t src_addr;
728 uint32_t dst_addr;
729
730 DEBUG_ASSERT(dev);
731 DEBUG_ASSERT(buf);
732
733 LTRACEF("got RX buf: head %hu buf in %d out %d\n", buf->head,
734 buf->in_iovs.used, buf->out_iovs.used);
735
736 /* we will need at least 1 iovec */
737 if (buf->in_iovs.used == 0) {
738 TRACEF("unexpected in_iovs num %d\n", buf->in_iovs.used);
739 return ERR_INVALID_ARGS;
740 }
741
742 /* there should be exactly 1 in_iov but it is not fatal if the first
743 one is big enough */
744 if (buf->in_iovs.used != 1) {
745 TRACEF("unexpected in_iovs num %d\n", buf->in_iovs.used);
746 }
747
748 /* out_iovs are not supported: just log message and ignore it */
749 if (buf->out_iovs.used != 0) {
750 TRACEF("unexpected out_iovs num %d\n", buf->out_iovs.used);
751 }
752
753 /* map in_iovs, Non-secure, no-execute, cached, read-only */
754 uint map_flags = ARCH_MMU_FLAG_PERM_NO_EXECUTE | ARCH_MMU_FLAG_PERM_RO;
755 int ret = vqueue_map_iovs(dev->vd.client_id, &buf->in_iovs, map_flags,
756 &dev->receive_mapped);
757 if (ret) {
758 TRACEF("failed to map iovs %d\n", ret);
759 return ret;
760 }
761
762 /* check message size */
763 if (buf->in_iovs.iovs[0].iov_len < sizeof(struct tipc_hdr)) {
764 TRACEF("msg too short %zu\n", buf->in_iovs.iovs[0].iov_len);
765 ret = ERR_INVALID_ARGS;
766 goto done;
767 }
768
769 ns_hdr = buf->in_iovs.iovs[0].iov_base;
770 ns_data = buf->in_iovs.iovs[0].iov_base + sizeof(struct tipc_hdr);
771 ns_shm_cnt = ns_hdr->shm_cnt;
772 ns_shm_len = ns_shm_cnt * sizeof(*ns_shm);
773 ns_data_len = ns_hdr->len - ns_shm_len;
774 ns_shm = ns_data + ns_data_len;
775 src_addr = ns_hdr->src;
776 dst_addr = ns_hdr->dst;
777
778 if (ns_shm_len + ns_data_len + sizeof(struct tipc_hdr) !=
779 buf->in_iovs.iovs[0].iov_len) {
780 TRACEF("malformed message len %zu shm_len %zu msglen %zu\n",
781 ns_data_len, ns_shm_len, buf->in_iovs.iovs[0].iov_len);
782 ret = ERR_INVALID_ARGS;
783 goto done;
784 }
785
786 if (dst_addr == TIPC_CTRL_ADDR) {
787 if (ns_shm_cnt != 0) {
788 TRACEF("sent message with shared memory objects to control address\n");
789 return ERR_INVALID_ARGS;
790 }
791 ret = handle_ctrl_msg(dev, src_addr, ns_data, ns_data_len);
792 } else {
793 ret = handle_chan_msg(dev, src_addr, dst_addr, ns_data, ns_data_len,
794 ns_shm, ns_shm_cnt);
795 }
796
797 done:
798 if (!dev->reuse_mapping) {
799 vqueue_unmap_iovs(&buf->in_iovs, &dev->receive_mapped);
800 }
801
802 return ret;
803 }
804
tipc_rx_thread_func(void * arg)805 static int tipc_rx_thread_func(void* arg) {
806 struct tipc_dev* dev = arg;
807 ext_mem_obj_id_t in_shared_mem_id[MAX_RX_IOVS];
808 struct iovec_kern in_iovs[MAX_RX_IOVS];
809 struct vqueue* vq = &dev->vqs[TIPC_VQ_RX];
810 struct vqueue_buf buf;
811 int ret = NO_ERROR;
812
813 LTRACEF("enter\n");
814
815 memset(&buf, 0, sizeof(buf));
816
817 buf.in_iovs.cnt = MAX_RX_IOVS;
818 buf.in_iovs.shared_mem_id = in_shared_mem_id;
819 buf.in_iovs.iovs = in_iovs;
820
821 while (!dev->rx_stop) {
822 /* wait for next available buffer */
823 event_wait(&vq->avail_event);
824
825 ret = vqueue_get_avail_buf(vq, &buf);
826
827 if (ret == ERR_CHANNEL_CLOSED)
828 break; /* need to terminate */
829
830 if (ret == ERR_NOT_ENOUGH_BUFFER)
831 continue; /* no new messages */
832
833 if (likely(ret == NO_ERROR)) {
834 ret = handle_rx_msg(dev, &buf);
835 }
836
837 ret = vqueue_add_buf(vq, &buf, (uint32_t)ret);
838 if (ret == ERR_CHANNEL_CLOSED)
839 break; /* need to terminate */
840
841 if (ret != NO_ERROR) {
842 /* any other error is only possible if
843 * vqueue is corrupted.
844 */
845 panic("Unable (%d) to return buffer to vqueue\n", ret);
846 }
847 }
848
849 TRACEF("exit: ret=%d\n", ret);
850
851 return 0;
852 }
853
854 struct data_cb_ctx {
855 struct handle* chan;
856 struct ipc_msg_info msg_inf;
857 };
858
tx_data_cb(uint8_t * buf,size_t buf_len,void * ctx)859 static int tx_data_cb(uint8_t* buf, size_t buf_len, void* ctx) {
860 int rc;
861 struct data_cb_ctx* cb_ctx = (struct data_cb_ctx*)ctx;
862
863 DEBUG_ASSERT(buf);
864 DEBUG_ASSERT(cb_ctx);
865
866 struct iovec_kern dst_iov = {buf, buf_len};
867 struct ipc_msg_kern dst_kern_msg = {
868 .iov = &dst_iov,
869 .num_iov = 1,
870 .num_handles = 0,
871 .handles = NULL,
872 };
873
874 /* read data */
875 rc = ipc_read_msg(cb_ctx->chan, cb_ctx->msg_inf.id, 0, &dst_kern_msg);
876
877 /* retire msg */
878 ipc_put_msg(cb_ctx->chan, cb_ctx->msg_inf.id);
879 return rc;
880 }
881
handle_tx_msg(struct tipc_dev * dev,struct handle * chan)882 static void handle_tx_msg(struct tipc_dev* dev, struct handle* chan) {
883 int ret;
884 uint32_t local = 0;
885 uint32_t remote = 0;
886 struct tipc_ept* ept;
887 struct data_cb_ctx cb_ctx = {.chan = chan};
888
889 mutex_acquire(&dev->ept_lock);
890 ept = handle_get_cookie(chan);
891 if (!ept) {
892 mutex_release(&dev->ept_lock);
893 return;
894 }
895 remote = ept->remote;
896 mutex_release(&dev->ept_lock);
897
898 /* for all available messages */
899 for (;;) {
900 /* get next message info */
901 ret = ipc_get_msg(chan, &cb_ctx.msg_inf);
902
903 if (ret == ERR_NO_MSG)
904 break; /* no new messages */
905
906 if (ret != NO_ERROR) {
907 /* should never happen */
908 panic("%s: failed (%d) to get message\n", __func__, ret);
909 }
910
911 uint16_t ttl_size = cb_ctx.msg_inf.len;
912
913 LTRACEF("forward message (%d bytes)\n", ttl_size);
914
915 /* send message using data callback */
916 ret = tipc_send_data(dev, local, remote, tx_data_cb, &cb_ctx, ttl_size,
917 true);
918 if (ret != NO_ERROR) {
919 /* nothing we can do about it: log it */
920 TRACEF("tipc_send_data failed (%d)\n", ret);
921 }
922 }
923 }
924
handle_hup(struct tipc_dev * dev,struct handle * chan)925 static void handle_hup(struct tipc_dev* dev, struct handle* chan) {
926 uint32_t local = 0;
927 uint32_t remote = 0;
928 struct tipc_ept* ept;
929 bool send_disc = false;
930
931 mutex_acquire(&dev->ept_lock);
932 ept = handle_get_cookie(chan);
933 if (ept) {
934 /* get remote address */
935 remote = ept->remote;
936 local = ept_to_addr(dev, ept);
937 send_disc = true;
938
939 /* remove handle from handle list */
940 handle_list_del(&dev->handle_list, chan);
941
942 /* kill cookie */
943 handle_set_cookie(chan, NULL);
944
945 /* close it */
946 handle_decref(chan);
947
948 /* free_local_address */
949 free_local_addr(dev, local);
950 }
951 mutex_release(&dev->ept_lock);
952
953 if (send_disc) {
954 /* send disconnect request */
955 (void)send_disc_req(dev, local, remote);
956 }
957 }
958
handle_ready(struct tipc_dev * dev,struct handle * chan)959 static void handle_ready(struct tipc_dev* dev, struct handle* chan) {
960 uint32_t local = 0;
961 uint32_t remote = 0;
962 struct tipc_ept* ept;
963 bool send_rsp = false;
964
965 mutex_acquire(&dev->ept_lock);
966 ept = handle_get_cookie(chan);
967 if (ept) {
968 /* get remote address */
969 remote = ept->remote;
970 local = ept_to_addr(dev, ept);
971 send_rsp = true;
972 }
973 mutex_release(&dev->ept_lock);
974
975 if (send_rsp) {
976 /* send disconnect request */
977 (void)send_conn_rsp(dev, local, remote, 0, IPC_CHAN_MAX_BUF_SIZE, 1);
978 }
979 }
980
handle_tx(struct tipc_dev * dev)981 static void handle_tx(struct tipc_dev* dev) {
982 int ret;
983 struct handle* chan;
984 uint32_t chan_event;
985
986 DEBUG_ASSERT(dev);
987
988 for (;;) {
989 /* wait for incoming messgages */
990 ret = handle_list_wait(&dev->handle_list, &chan, &chan_event,
991 INFINITE_TIME);
992
993 if (ret == ERR_NOT_FOUND) {
994 /* no handles left */
995 return;
996 }
997
998 if (ret < 0) {
999 /* only possible if somebody else is waiting
1000 on the same handle which should never happen */
1001 panic("%s: couldn't wait for handle events (%d)\n", __func__, ret);
1002 }
1003
1004 DEBUG_ASSERT(chan);
1005 DEBUG_ASSERT(ipc_is_channel(chan));
1006
1007 if (chan_event & IPC_HANDLE_POLL_READY) {
1008 handle_ready(dev, chan);
1009 } else if (chan_event & IPC_HANDLE_POLL_MSG) {
1010 handle_tx_msg(dev, chan);
1011 } else if (chan_event & IPC_HANDLE_POLL_HUP) {
1012 handle_hup(dev, chan);
1013 } else {
1014 TRACEF("Unhandled event %x\n", chan_event);
1015 }
1016 handle_decref(chan);
1017 }
1018 }
1019
tipc_tx_thread_func(void * arg)1020 static int tipc_tx_thread_func(void* arg) {
1021 struct tipc_dev* dev = arg;
1022
1023 LTRACEF("enter\n");
1024 while (!dev->tx_stop) {
1025 LTRACEF("waiting for handles\n");
1026
1027 /* wait forever until we have handles */
1028 event_wait(&dev->have_handles);
1029
1030 LTRACEF("have handles\n");
1031
1032 /* handle messsages */
1033 handle_tx(dev);
1034
1035 LTRACEF("no handles\n");
1036 }
1037
1038 TRACEF("exit\n");
1039 return 0;
1040 }
1041
tipc_dev_reset(struct tipc_dev * dev)1042 static status_t tipc_dev_reset(struct tipc_dev* dev) {
1043 status_t rc;
1044 struct tipc_ept* ept;
1045
1046 TRACEF("tipc_dev_reset: devid=%d state=%d\n", dev->vd.devid, dev->vd.state);
1047
1048 if (dev->vd.state == VDEV_STATE_RESET)
1049 return NO_ERROR;
1050
1051 /* Shutdown rx thread to block all incomming requests */
1052 dev->rx_stop = true;
1053 vqueue_signal_avail(&dev->vqs[TIPC_VQ_RX]);
1054 rc = thread_join(dev->rx_thread, NULL, 1000);
1055 LTRACEF("rx thread join: returned %d\n", rc);
1056 if (rc != NO_ERROR) {
1057 panic("unable to shutdown rx thread: %d\n", rc);
1058 }
1059 dev->rx_thread = NULL;
1060 dev->rx_stop = false;
1061
1062 /* Set stop tx thread */
1063 dev->tx_stop = true;
1064
1065 /* close all channels */
1066 mutex_acquire(&dev->ept_lock);
1067 ept = dev->epts;
1068 for (uint slot = 0; slot < countof(dev->epts); slot++, ept++) {
1069 if (!bitmap_test(dev->inuse, slot))
1070 continue;
1071
1072 if (!ept->chan)
1073 continue;
1074
1075 handle_list_del(&dev->handle_list, ept->chan);
1076 handle_set_cookie(ept->chan, NULL);
1077 handle_decref(ept->chan);
1078 free_local_addr(dev, ept_to_addr(dev, ept));
1079 }
1080 mutex_release(&dev->ept_lock);
1081
1082 /* kick tx thread and tx vq */
1083 event_signal(&dev->have_handles, false);
1084 vqueue_signal_avail(&dev->vqs[TIPC_VQ_TX]);
1085
1086 /* wait it to terminate */
1087 rc = thread_join(dev->tx_thread, NULL, 1000);
1088 LTRACEF("tx thread join: returned %d\n", rc);
1089 if (rc != NO_ERROR) {
1090 panic("unable to shutdown tx thread: %d\n", rc);
1091 }
1092 dev->tx_thread = NULL;
1093 dev->tx_stop = false;
1094
1095 /* destroy vqs */
1096 vqueue_destroy(&dev->vqs[TIPC_VQ_RX]);
1097 vqueue_destroy(&dev->vqs[TIPC_VQ_TX]);
1098
1099 /* enter reset state */
1100 dev->vd.state = VDEV_STATE_RESET;
1101
1102 return NO_ERROR;
1103 }
1104
tipc_vdev_reset(struct vdev * vd)1105 static status_t tipc_vdev_reset(struct vdev* vd) {
1106 DEBUG_ASSERT(vd);
1107
1108 struct tipc_dev* dev = vdev_to_dev(vd);
1109 return tipc_dev_reset(dev);
1110 }
1111
tipc_descr_size(struct vdev * vd)1112 static size_t tipc_descr_size(struct vdev* vd) {
1113 struct tipc_dev* dev = vdev_to_dev(vd);
1114 return dev->descr_size;
1115 }
1116
tipc_get_vdev_descr(struct vdev * vd,void * descr)1117 static ssize_t tipc_get_vdev_descr(struct vdev* vd, void* descr) {
1118 struct tipc_dev* dev = vdev_to_dev(vd);
1119 struct tipc_vdev_descr* vdev_descr = descr;
1120
1121 /* copy descrpitor out of template */
1122 memcpy(vdev_descr, dev->descr_ptr, dev->descr_size);
1123
1124 /* patch notifyid */
1125 vdev_descr->vdev.notifyid = vd->devid;
1126
1127 return dev->descr_size;
1128 }
1129
validate_descr(struct tipc_dev * dev,struct tipc_vdev_descr * vdev_descr)1130 static status_t validate_descr(struct tipc_dev* dev,
1131 struct tipc_vdev_descr* vdev_descr) {
1132 if (vdev_descr->hdr.type != RSC_VDEV) {
1133 TRACEF("unexpected type %d\n", vdev_descr->hdr.type);
1134 return ERR_INVALID_ARGS;
1135 }
1136
1137 if (vdev_descr->vdev.id != VIRTIO_ID_TIPC) {
1138 TRACEF("unexpected vdev id%d\n", vdev_descr->vdev.id);
1139 return ERR_INVALID_ARGS;
1140 }
1141
1142 if (vdev_descr->vdev.num_of_vrings != TIPC_VQ_NUM) {
1143 TRACEF("unexpected number of vrings (%d vs. %d)\n",
1144 vdev_descr->vdev.num_of_vrings, TIPC_VQ_NUM);
1145 return ERR_INVALID_ARGS;
1146 }
1147
1148 /* check if NS driver successfully initilized */
1149 if (vdev_descr->vdev.status !=
1150 (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER |
1151 VIRTIO_CONFIG_S_DRIVER_OK)) {
1152 TRACEF("unexpected status %d\n", (int)vdev_descr->vdev.status);
1153 return ERR_INVALID_ARGS;
1154 }
1155
1156 return NO_ERROR;
1157 }
1158
1159 /*
1160 * Should be only called once.
1161 */
tipc_dev_probe(struct tipc_dev * dev,struct tipc_vdev_descr * dscr)1162 static status_t tipc_dev_probe(struct tipc_dev* dev,
1163 struct tipc_vdev_descr* dscr) {
1164 status_t ret;
1165 uint vring_cnt;
1166 char tname[32];
1167
1168 LTRACEF("%p: descr = %p\n", dev, dscr);
1169
1170 if (dev->vd.state != VDEV_STATE_RESET)
1171 return ERR_BAD_STATE;
1172
1173 ret = validate_descr(dev, dscr);
1174 if (ret != NO_ERROR)
1175 return ret;
1176
1177 /* vring[0] == TX queue (host's RX) */
1178 /* vring[1] == RX queue (host's TX) */
1179 for (vring_cnt = 0; vring_cnt < dscr->vdev.num_of_vrings; vring_cnt++) {
1180 struct fw_rsc_vdev_vring* vring = &dscr->vrings[vring_cnt];
1181
1182 /*
1183 * We store top 32 bits of vring 64 bit shared memory id in 'reserved'
1184 * field of vring descriptor structure.
1185 */
1186 ext_mem_obj_id_t smem_id =
1187 ((uint64_t)vring->reserved << 32) | vring->da;
1188
1189 LTRACEF("vring%d: mem-id 0x%" PRIx64 " align %u num %u nid %u\n",
1190 vring_cnt, smem_id, vring->align, vring->num, vring->notifyid);
1191
1192 ret = vqueue_init(&dev->vqs[vring_cnt], vring->notifyid,
1193 dev->vd.client_id, smem_id, vring->num, vring->align,
1194 dev, notify_cbs[vring_cnt], NULL);
1195 if (ret)
1196 goto err_vq_init;
1197 }
1198
1199 /* create rx thread */
1200 snprintf(tname, sizeof(tname), "tipc-dev%d-rx", dev->vd.devid);
1201 dev->rx_thread = thread_create(tname, tipc_rx_thread_func, dev,
1202 DEFAULT_PRIORITY, DEFAULT_STACK_SIZE);
1203 if (!dev->rx_thread) {
1204 ret = ERR_NO_MEMORY;
1205 goto err_create_rx_thread;
1206 }
1207
1208 /* create tx thread */
1209 snprintf(tname, sizeof(tname), "tipc-dev%d-tx", dev->vd.devid);
1210 dev->tx_thread = thread_create(tname, tipc_tx_thread_func, dev,
1211 DEFAULT_PRIORITY, DEFAULT_STACK_SIZE);
1212 if (!dev->tx_thread) {
1213 ret = ERR_NO_MEMORY;
1214 goto err_create_tx_thread;
1215 }
1216
1217 thread_resume(dev->rx_thread);
1218 thread_resume(dev->tx_thread);
1219
1220 ret = _go_online(dev);
1221 if (ret == NO_ERROR) {
1222 dev->vd.state = VDEV_STATE_ACTIVE;
1223 }
1224
1225 return ret;
1226
1227 err_create_tx_thread:
1228 /* TODO: free rx thread */
1229 err_create_rx_thread:
1230 err_vq_init:
1231 for (; vring_cnt > 0; vring_cnt--) {
1232 vqueue_destroy(&dev->vqs[vring_cnt]);
1233 }
1234 TRACEF("failed, ret = %d\n", ret);
1235 return ret;
1236 }
1237
tipc_vdev_probe(struct vdev * vd,void * descr)1238 static status_t tipc_vdev_probe(struct vdev* vd, void* descr) {
1239 DEBUG_ASSERT(vd);
1240 DEBUG_ASSERT(descr);
1241
1242 struct tipc_dev* dev = vdev_to_dev(vd);
1243 return tipc_dev_probe(dev, descr);
1244 }
1245
tipc_vdev_kick_vq(struct vdev * vd,uint vqid)1246 static status_t tipc_vdev_kick_vq(struct vdev* vd, uint vqid) {
1247 DEBUG_ASSERT(vd);
1248 struct tipc_dev* dev = vdev_to_dev(vd);
1249
1250 LTRACEF("devid = %d: vq=%u\n", vd->devid, vqid);
1251
1252 /* check TX VQ */
1253 if (vqid == vqueue_id(&dev->vqs[TIPC_VQ_TX])) {
1254 return vqueue_notify(&dev->vqs[TIPC_VQ_TX]);
1255 }
1256
1257 /* check RX VQ */
1258 if (vqid == vqueue_id(&dev->vqs[TIPC_VQ_RX])) {
1259 return vqueue_notify(&dev->vqs[TIPC_VQ_RX]);
1260 }
1261
1262 return ERR_NOT_FOUND;
1263 }
1264
tipc_send_data(struct tipc_dev * dev,uint32_t local,uint32_t remote,tipc_data_cb_t cb,void * cb_ctx,uint16_t data_len,bool wait)1265 static int tipc_send_data(struct tipc_dev* dev,
1266 uint32_t local,
1267 uint32_t remote,
1268 tipc_data_cb_t cb,
1269 void* cb_ctx,
1270 uint16_t data_len,
1271 bool wait) {
1272 ext_mem_obj_id_t out_shared_mem_id[MAX_TX_IOVS];
1273 struct iovec_kern out_iovs[MAX_TX_IOVS];
1274 struct vqueue* vq = &dev->vqs[TIPC_VQ_TX];
1275 struct vqueue_buf buf;
1276 int ret = 0;
1277
1278 DEBUG_ASSERT(dev);
1279
1280 /* check if data callback specified */
1281 if (!cb)
1282 return ERR_INVALID_ARGS;
1283
1284 size_t ttl_len = sizeof(struct tipc_hdr) + data_len;
1285
1286 memset(&buf, 0, sizeof(buf));
1287 buf.out_iovs.cnt = MAX_TX_IOVS;
1288 buf.out_iovs.shared_mem_id = out_shared_mem_id;
1289 buf.out_iovs.iovs = out_iovs;
1290
1291 /* get buffer or wait if needed */
1292 do {
1293 /* get buffer */
1294 ret = vqueue_get_avail_buf(vq, &buf);
1295 if (ret == NO_ERROR) {
1296 /* got it */
1297 break;
1298 }
1299
1300 if (ret != ERR_NOT_ENOUGH_BUFFER || !wait) {
1301 /* no buffers and no wait */
1302 goto err;
1303 }
1304
1305 /* wait for buffers */
1306 event_wait(&vq->avail_event);
1307 if (dev->tx_stop) {
1308 return ERR_CHANNEL_CLOSED;
1309 }
1310 } while (true);
1311
1312 /* we only support and expect single out_iovec for now */
1313 if (buf.out_iovs.used == 0) {
1314 TRACEF("unexpected iovec cnt in = %d out = %d\n", buf.in_iovs.used,
1315 buf.out_iovs.used);
1316 ret = ERR_NOT_ENOUGH_BUFFER;
1317 goto done;
1318 }
1319
1320 if (buf.out_iovs.used != 1 || buf.in_iovs.used != 0) {
1321 LTRACEF("unexpected iovec cnt in = %d out = %d\n", buf.in_iovs.used,
1322 buf.out_iovs.used);
1323 }
1324
1325 /* the first iovec should be large enough to hold header */
1326 if (sizeof(struct tipc_hdr) > buf.out_iovs.iovs[0].iov_len) {
1327 /* not enough space to even place header */
1328 TRACEF("buf is too small (%zu < %zu)\n", buf.out_iovs.iovs[0].iov_len,
1329 ttl_len);
1330 ret = ERR_NOT_ENOUGH_BUFFER;
1331 goto done;
1332 }
1333
1334 /* map in provided buffers (no-execute, read-write) */
1335 uint map_flags = ARCH_MMU_FLAG_PERM_NO_EXECUTE;
1336 ret = vqueue_map_iovs(dev->vd.client_id, &buf.out_iovs, map_flags,
1337 &dev->send_mapped);
1338 if (ret == NO_ERROR) {
1339 struct tipc_hdr* hdr = buf.out_iovs.iovs[0].iov_base;
1340
1341 hdr->src = local;
1342 hdr->dst = remote;
1343 hdr->reserved = 0;
1344 hdr->len = data_len;
1345 hdr->flags = 0;
1346
1347 if (ttl_len > buf.out_iovs.iovs[0].iov_len) {
1348 /* not enough space to put the whole message
1349 so it will be truncated */
1350 TRACEF("buf is too small (%zu < %zu)\n",
1351 buf.out_iovs.iovs[0].iov_len, ttl_len);
1352 data_len = buf.out_iovs.iovs[0].iov_len - sizeof(struct tipc_hdr);
1353 }
1354
1355 /* invoke data_cb to add actual data */
1356 ret = cb(hdr->data, data_len, cb_ctx);
1357 if (ret >= 0) {
1358 /* add header */
1359 ret += sizeof(struct tipc_hdr);
1360 }
1361
1362 if (!dev->reuse_mapping)
1363 vqueue_unmap_iovs(&buf.out_iovs, &dev->send_mapped);
1364 }
1365
1366 done:
1367 ret = vqueue_add_buf(vq, &buf, (uint32_t)ret);
1368 err:
1369 return ret;
1370 }
1371
1372 struct buf_ctx {
1373 uint8_t* data;
1374 size_t len;
1375 };
1376
_send_buf(uint8_t * dst,size_t sz,void * ctx)1377 static int _send_buf(uint8_t* dst, size_t sz, void* ctx) {
1378 struct buf_ctx* buf = (struct buf_ctx*)ctx;
1379
1380 DEBUG_ASSERT(dst);
1381 DEBUG_ASSERT(buf);
1382 DEBUG_ASSERT(buf->data);
1383 DEBUG_ASSERT(sz <= buf->len);
1384
1385 memcpy(dst, buf->data, sz);
1386
1387 return (int)sz;
1388 }
1389
tipc_send_buf(struct tipc_dev * dev,uint32_t local,uint32_t remote,void * data,uint16_t data_len,bool wait)1390 static int tipc_send_buf(struct tipc_dev* dev,
1391 uint32_t local,
1392 uint32_t remote,
1393 void* data,
1394 uint16_t data_len,
1395 bool wait) {
1396 struct buf_ctx ctx = {data, data_len};
1397
1398 return tipc_send_data(dev, local, remote, _send_buf, &ctx, data_len, wait);
1399 }
1400
1401 static const struct vdev_ops _tipc_dev_ops = {
1402 .descr_sz = tipc_descr_size,
1403 .get_descr = tipc_get_vdev_descr,
1404 .probe = tipc_vdev_probe,
1405 .reset = tipc_vdev_reset,
1406 .kick_vqueue = tipc_vdev_kick_vq,
1407 };
1408
create_tipc_device(struct trusty_virtio_bus * vb,const struct tipc_vdev_descr * descr,size_t size,const uuid_t * uuid,struct tipc_dev ** dev_ptr)1409 status_t create_tipc_device(struct trusty_virtio_bus* vb,
1410 const struct tipc_vdev_descr* descr,
1411 size_t size,
1412 const uuid_t* uuid,
1413 struct tipc_dev** dev_ptr) {
1414 status_t ret;
1415 struct tipc_dev* dev;
1416
1417 DEBUG_ASSERT(uuid);
1418 DEBUG_ASSERT(descr);
1419 DEBUG_ASSERT(size);
1420
1421 dev = calloc(1, sizeof(*dev));
1422 if (!dev)
1423 return ERR_NO_MEMORY;
1424
1425 mutex_init(&dev->ept_lock);
1426 dev->vd.ops = &_tipc_dev_ops;
1427 dev->uuid = uuid;
1428 dev->descr_ptr = descr;
1429 dev->descr_size = size;
1430 handle_list_init(&dev->handle_list);
1431 event_init(&dev->have_handles, false, EVENT_FLAG_AUTOUNSIGNAL);
1432
1433 bst_root_initialize(&dev->send_mapped.list);
1434 mutex_init(&dev->send_mapped.lock);
1435 dev->send_mapped.in_direction = false;
1436 bst_root_initialize(&dev->receive_mapped.list);
1437 mutex_init(&dev->receive_mapped.lock);
1438 dev->receive_mapped.in_direction = true;
1439 dev->reuse_mapping = false;
1440
1441 ret = virtio_register_device(vb, &dev->vd);
1442
1443 if (ret != NO_ERROR)
1444 goto err_register;
1445
1446 if (dev_ptr)
1447 *dev_ptr = dev;
1448
1449 return NO_ERROR;
1450
1451 err_register:
1452 free(dev);
1453 return ret;
1454 }
1455
release_shm(struct tipc_dev * dev,uint64_t shm_id)1456 status_t release_shm(struct tipc_dev* dev, uint64_t shm_id) {
1457 struct {
1458 struct tipc_ctrl_msg_hdr hdr;
1459 struct tipc_release_body body;
1460 } msg;
1461
1462 msg.hdr.type = TIPC_CTRL_MSGTYPE_RELEASE;
1463 msg.hdr.body_len = sizeof(struct tipc_release_body);
1464
1465 msg.body.id = shm_id;
1466
1467 LTRACEF("release shm %" PRIu64 "\n", shm_id);
1468
1469 return tipc_send_buf(dev, TIPC_CTRL_ADDR, TIPC_CTRL_ADDR, &msg, sizeof(msg),
1470 true);
1471 }
1472