1 /*
2 * Copyright (c) 2016, Google, Inc. All rights reserved
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "tipc_dev_ql.h"
25
26 #include <assert.h>
27 #include <compiler.h>
28 #include <err.h>
29 #include <inttypes.h>
30 #include <kernel/mutex.h>
31 #include <kernel/vm.h>
32 #include <lib/sm/sm_err.h>
33 #include <lib/trusty/handle.h>
34 #include <lib/trusty/handle_set.h>
35 #include <lib/trusty/ipc.h>
36 #include <lib/trusty/ipc_msg.h>
37 #include <list.h>
38 #include <stddef.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <trace.h>
42
43 #define LOCAL_TRACE 0
44
45 /*
46 * Max number of sync tipc devices
47 */
48 #define QL_TIPC_DEV_MAX_NUM 2
49
50 /*
51 * Max number of opened channels supported
52 */
53 #define QL_TIPC_ADDR_MAX_NUM 32
54
55 /*
56 * Local addresses base
57 */
58 #define QL_TIPC_ADDR_BASE 32
59
60 /*
61 * Maximum service name size
62 */
63 #define TIPC_MAX_SRV_NAME_LEN (256)
64
65 struct tipc_ept {
66 struct handle* chan;
67 struct handle_ref* ref;
68 uint64_t cookie;
69 };
70
71 struct ql_tipc_dev {
72 struct list_node node;
73 struct handle* handle_set;
74 bool in_use; /* protected by @_dev_list_lock */
75
76 uint ns_mmu_flags;
77 ns_size_t ns_sz;
78 ext_mem_client_id_t client_id;
79 ext_mem_obj_id_t buf_id;
80 void* ns_va;
81 const uuid_t* uuid;
82
83 unsigned long inuse[BITMAP_NUM_WORDS(QL_TIPC_ADDR_MAX_NUM)];
84 struct tipc_ept epts[QL_TIPC_ADDR_MAX_NUM];
85 };
86
87 struct tipc_cmd_hdr {
88 uint16_t opcode;
89 uint16_t flags;
90 uint32_t status;
91 uint32_t handle;
92 uint32_t payload_len;
93 };
94
95 struct tipc_event {
96 uint32_t event;
97 uint32_t handle;
98 uint64_t cookie;
99 };
100
101 struct tipc_wait_req {
102 uint64_t reserved;
103 };
104
105 struct tipc_connect_req {
106 uint64_t cookie;
107 uint64_t reserved;
108 uint8_t name[0];
109 };
110
111 #ifdef SPIN_LOCK_FLAG_IRQ_FIQ
112 #define SLOCK_FLAGS SPIN_LOCK_FLAG_IRQ_FIQ
113 #else
114 #define SLOCK_FLAGS SPIN_LOCK_FLAG_INTERRUPTS
115 #endif
116
117 static uint _dev_cnt;
118 /*
119 * @_dev_list is only modified from stdcalls with _dev_list_lock held. It can
120 * be read from any context with @_dev_list_lock held and from stdcalls without
121 * the @_dev_list_lock held.
122 */
123 static struct list_node _dev_list = LIST_INITIAL_VALUE(_dev_list);
124 /* @_dev_list_lock protects @_dev_list and @struct ql_tipc_dev->in_use */
125 static spin_lock_t _dev_list_lock = SPIN_LOCK_INITIAL_VALUE;
126
addr_to_slot(uint32_t addr)127 static inline uint addr_to_slot(uint32_t addr) {
128 return (uint)(addr - QL_TIPC_ADDR_BASE);
129 }
130
slot_to_addr(uint slot)131 static inline uint32_t slot_to_addr(uint slot) {
132 return (uint32_t)(slot + QL_TIPC_ADDR_BASE);
133 }
134
alloc_local_addr(struct ql_tipc_dev * dev,struct handle * chan,uint64_t cookie)135 static uint32_t alloc_local_addr(struct ql_tipc_dev* dev,
136 struct handle* chan,
137 uint64_t cookie) {
138 int slot = bitmap_ffz(dev->inuse, QL_TIPC_ADDR_MAX_NUM);
139 if (slot >= 0) {
140 bitmap_set(dev->inuse, slot);
141 dev->epts[slot].chan = chan;
142 dev->epts[slot].cookie = cookie;
143 return slot_to_addr(slot);
144 }
145 return 0;
146 }
147
ept_lookup(struct ql_tipc_dev * dev,uint32_t local)148 static struct tipc_ept* ept_lookup(struct ql_tipc_dev* dev, uint32_t local) {
149 uint slot = addr_to_slot(local);
150 if (slot < QL_TIPC_ADDR_MAX_NUM) {
151 if (bitmap_test(dev->inuse, slot)) {
152 return &dev->epts[slot];
153 }
154 }
155 return NULL;
156 }
157
ept_to_addr(struct ql_tipc_dev * dev,struct tipc_ept * ept)158 static uint32_t ept_to_addr(struct ql_tipc_dev* dev, struct tipc_ept* ept) {
159 return slot_to_addr(ept - dev->epts);
160 }
161
free_local_addr(struct ql_tipc_dev * dev,uint32_t local)162 static void free_local_addr(struct ql_tipc_dev* dev, uint32_t local) {
163 uint slot = addr_to_slot(local);
164
165 if (slot < QL_TIPC_ADDR_MAX_NUM) {
166 bitmap_clear(dev->inuse, slot);
167 dev->epts[slot].chan = NULL;
168 dev->epts[slot].cookie = 0;
169 }
170 }
171
dev_lookup(ext_mem_client_id_t client_id,ext_mem_obj_id_t buf_id)172 static struct ql_tipc_dev* dev_lookup(ext_mem_client_id_t client_id,
173 ext_mem_obj_id_t buf_id) {
174 struct ql_tipc_dev* dev;
175
176 list_for_every_entry(&_dev_list, dev, struct ql_tipc_dev, node) {
177 if (dev->client_id == client_id && dev->buf_id == buf_id) {
178 return dev;
179 }
180 }
181 return NULL;
182 }
183
dev_acquire(ext_mem_client_id_t client_id,ext_mem_obj_id_t buf_id)184 static struct ql_tipc_dev* dev_acquire(ext_mem_client_id_t client_id,
185 ext_mem_obj_id_t buf_id) {
186 struct ql_tipc_dev* dev;
187 spin_lock_saved_state_t state;
188
189 spin_lock_save(&_dev_list_lock, &state, SLOCK_FLAGS);
190 dev = dev_lookup(client_id, buf_id);
191 if (dev) {
192 if (dev->in_use) {
193 TRACEF("0x%" PRIx64 ": device in use by another cpu\n", buf_id);
194 dev = NULL;
195 } else {
196 dev->in_use = true;
197 }
198 }
199 spin_unlock_restore(&_dev_list_lock, state, SLOCK_FLAGS);
200
201 return dev;
202 }
203
dev_release(struct ql_tipc_dev * dev)204 static void dev_release(struct ql_tipc_dev* dev) {
205 spin_lock_saved_state_t state;
206
207 spin_lock_save(&_dev_list_lock, &state, SLOCK_FLAGS);
208 DEBUG_ASSERT(dev->in_use);
209 dev->in_use = false;
210 spin_unlock_restore(&_dev_list_lock, state, SLOCK_FLAGS);
211 }
212
dev_create(ext_mem_client_id_t client_id,ext_mem_obj_id_t buf_id,ns_size_t buf_sz,uint buf_mmu_flags)213 static long dev_create(ext_mem_client_id_t client_id,
214 ext_mem_obj_id_t buf_id,
215 ns_size_t buf_sz,
216 uint buf_mmu_flags) {
217 status_t res;
218 struct ql_tipc_dev* dev;
219 spin_lock_saved_state_t state;
220
221 dev = dev_lookup(client_id, buf_id);
222 if (dev) {
223 LTRACEF("0x%" PRIx64 ": device already exists\n", buf_id);
224 return SM_ERR_INVALID_PARAMETERS;
225 }
226
227 if (!buf_sz) {
228 LTRACEF("zero size shared buffer specified\n");
229 return SM_ERR_INVALID_PARAMETERS;
230 }
231
232 if (buf_sz & (PAGE_SIZE - 1)) {
233 LTRACEF("shared buffer size is not page aligned: 0x%x\n", buf_sz);
234 return SM_ERR_INVALID_PARAMETERS;
235 }
236
237 if (_dev_cnt >= QL_TIPC_DEV_MAX_NUM) {
238 LTRACEF("max number of devices reached: %d\n", _dev_cnt);
239 return SM_ERR_NOT_ALLOWED;
240 }
241
242 dev = calloc(1, sizeof(*dev));
243 if (!dev) {
244 LTRACEF("out of memory creating sync tipc dev\n");
245 return SM_ERR_INTERNAL_FAILURE;
246 }
247
248 dev->uuid = &zero_uuid;
249
250 list_clear_node(&dev->node);
251 dev->handle_set = handle_set_create();
252 if (!dev->handle_set) {
253 LTRACEF("out of memory creating handle_set\n");
254 free(dev);
255 return SM_ERR_INTERNAL_FAILURE;
256 }
257
258 /* map shared buffer into address space */
259 dev->client_id = client_id;
260 dev->buf_id = buf_id;
261 dev->ns_sz = buf_sz;
262 dev->ns_mmu_flags = buf_mmu_flags;
263 res = ext_mem_map_obj_id(vmm_get_kernel_aspace(), "tipc", client_id, buf_id,
264 0, 0, round_up(buf_sz, PAGE_SIZE), &dev->ns_va,
265 PAGE_SIZE_SHIFT, 0, buf_mmu_flags);
266 if (res != NO_ERROR) {
267 LTRACEF("failed (%d) to map shared buffer\n", res);
268 free(dev);
269 return SM_ERR_INTERNAL_FAILURE;
270 }
271
272 spin_lock_save(&_dev_list_lock, &state, SLOCK_FLAGS);
273 list_add_head(&_dev_list, &dev->node);
274 spin_unlock_restore(&_dev_list_lock, state, SLOCK_FLAGS);
275 _dev_cnt++;
276
277 LTRACEF("tipc dev: %u bytes @ 0x%" PRIx64 ":0x%" PRIx64
278 " (%p) (flags=0x%x)\n",
279 dev->ns_sz, dev->client_id, dev->buf_id, dev->ns_va,
280 dev->ns_mmu_flags);
281
282 return 0;
283 }
284
dev_shutdown(struct ql_tipc_dev * dev)285 static void dev_shutdown(struct ql_tipc_dev* dev) {
286 spin_lock_saved_state_t state;
287
288 DEBUG_ASSERT(dev);
289 DEBUG_ASSERT(dev->ns_va);
290 DEBUG_ASSERT(dev->in_use);
291
292 /* remove from list */
293 spin_lock_save(&_dev_list_lock, &state, SLOCK_FLAGS);
294 list_delete(&dev->node);
295 spin_unlock_restore(&_dev_list_lock, state, SLOCK_FLAGS);
296 _dev_cnt--;
297
298 /* unmap shared region */
299 vmm_free_region(vmm_get_kernel_aspace(), (vaddr_t)dev->ns_va);
300 dev->ns_va = NULL;
301
302 /* close all channels */
303 for (uint slot = 0; slot < countof(dev->epts); slot++) {
304 struct tipc_ept* ept = &dev->epts[slot];
305
306 if (!bitmap_test(dev->inuse, slot))
307 continue;
308
309 if (!ept->chan)
310 continue;
311
312 handle_set_detach_ref(ept->ref);
313 handle_decref(ept->chan);
314 free(ept->ref);
315 ept->ref = NULL;
316 handle_set_cookie(ept->chan, NULL);
317 handle_close(ept->chan);
318 }
319 free(dev);
320 }
321
set_status(struct ql_tipc_dev * dev,int cmd,int err,size_t len)322 static long set_status(struct ql_tipc_dev* dev, int cmd, int err, size_t len) {
323 struct tipc_cmd_hdr* ns_hdr = dev->ns_va;
324
325 ns_hdr->status = (err < 0) ? 1 : 0;
326 ns_hdr->payload_len = len;
327 ns_hdr->opcode = cmd | QL_TIPC_DEV_RESP;
328
329 smp_wmb();
330 return err;
331 }
332
dev_connect(struct ql_tipc_dev * dev,void * ns_payload,size_t ns_payload_len)333 static int dev_connect(struct ql_tipc_dev* dev,
334 void* ns_payload,
335 size_t ns_payload_len) {
336 int rc;
337 uint32_t local = 0;
338 struct handle* chan = NULL;
339 int opcode = QL_TIPC_DEV_CONNECT;
340 struct handle_ref* ref;
341 struct tipc_cmd_hdr* ns_hdr = dev->ns_va;
342 struct {
343 struct tipc_connect_req hdr;
344 uint8_t body[TIPC_MAX_SRV_NAME_LEN + 1];
345 } req;
346
347 if (ns_payload_len <= sizeof(req.hdr))
348 return set_status(dev, opcode, ERR_INVALID_ARGS, 0);
349
350 if (ns_payload_len - sizeof(req.hdr) >= sizeof(req.body))
351 return set_status(dev, opcode, ERR_INVALID_ARGS, 0);
352
353 /* copy out and zero terminate */
354 memcpy(&req, ns_payload, ns_payload_len);
355 req.body[ns_payload_len - sizeof(req.hdr)] = 0;
356
357 /* open ipc channel */
358 rc = ipc_port_connect_async(dev->uuid, (const char*)req.body,
359 ns_payload_len - sizeof(req.hdr), 0, &chan);
360 if (rc != NO_ERROR) {
361 TRACEF("failed to open ipc channel: %d\n", rc);
362 return set_status(dev, opcode, rc, 0);
363 }
364
365 /* allocate slot */
366 local = alloc_local_addr(dev, chan, req.hdr.cookie);
367 if (local == 0) {
368 TRACEF("failed to alloc local address\n");
369 handle_close(chan);
370 chan = NULL;
371 return set_status(dev, opcode, ERR_NO_RESOURCES, 0);
372 }
373
374 LTRACEF("new handle: 0x%x\n", local);
375 handle_set_cookie(chan, ept_lookup(dev, local));
376
377 ref = calloc(1, sizeof(*ref));
378 if (!ref) {
379 rc = ERR_NO_MEMORY;
380 goto err_alloc_ref;
381 }
382
383 handle_incref(chan);
384 ref->handle = chan;
385 ref->emask = ~0U;
386 ref->cookie = ept_lookup(dev, local);
387 ref->id = local;
388
389 rc = handle_set_attach(dev->handle_set, ref);
390 if (rc) {
391 goto err_handle_set_attach;
392 }
393 ept_lookup(dev, local)->ref = ref;
394
395 ns_hdr->handle = local;
396
397 return set_status(dev, opcode, 0, 0);
398
399 err_handle_set_attach:
400 handle_decref(chan);
401 free(ref);
402 err_alloc_ref:
403 free_local_addr(dev, local);
404 handle_close(chan);
405 chan = NULL;
406 return set_status(dev, opcode, rc, 0);
407 }
408
dev_disconnect(struct ql_tipc_dev * dev,uint32_t target)409 static long dev_disconnect(struct ql_tipc_dev* dev, uint32_t target) {
410 struct tipc_ept* ept;
411 int opcode = QL_TIPC_DEV_DISCONNECT;
412
413 ept = ept_lookup(dev, target);
414 if (!ept || !ept->chan)
415 return SM_ERR_INVALID_PARAMETERS;
416
417 handle_set_detach_ref(ept->ref);
418 handle_decref(ept->chan);
419 free(ept->ref);
420 ept->ref = NULL;
421 handle_set_cookie(ept->chan, NULL);
422 handle_close(ept->chan);
423 free_local_addr(dev, target);
424
425 return set_status(dev, opcode, 0, 0);
426 }
427
dev_send(struct ql_tipc_dev * dev,void * ns_data,size_t ns_sz,uint32_t target)428 static long dev_send(struct ql_tipc_dev* dev,
429 void* ns_data,
430 size_t ns_sz,
431 uint32_t target) {
432 int opcode = QL_TIPC_DEV_SEND;
433 struct tipc_ept* ept = ept_lookup(dev, target);
434 if (!ept || !ept->chan)
435 return set_status(dev, opcode, ERR_INVALID_ARGS, 0);
436
437 struct ipc_msg_kern msg = {
438 .iov =
439 (struct iovec_kern[]){
440 [0] = {.iov_base = ns_data, .iov_len = ns_sz},
441 },
442 .num_iov = 1,
443 .num_handles = 0};
444
445 return set_status(dev, opcode, ipc_send_msg(ept->chan, &msg), 0);
446 }
447
dev_recv(struct ql_tipc_dev * dev,uint32_t target)448 static long dev_recv(struct ql_tipc_dev* dev, uint32_t target) {
449 int rc;
450 int opcode = QL_TIPC_DEV_RECV;
451 struct tipc_ept* ept = ept_lookup(dev, target);
452 if (!ept || !ept->chan)
453 return set_status(dev, opcode, ERR_INVALID_ARGS, 0);
454
455 struct ipc_msg_info mi;
456 rc = ipc_get_msg(ept->chan, &mi);
457 if (rc < 0)
458 return set_status(dev, opcode, rc, 0);
459
460 struct ipc_msg_kern msg = {
461 .iov =
462 (struct iovec_kern[]){
463 [0] = {.iov_base = dev->ns_va +
464 sizeof(struct tipc_cmd_hdr),
465 .iov_len = dev->ns_sz -
466 sizeof(struct tipc_cmd_hdr)},
467 },
468 .num_iov = 1,
469 .num_handles = 0};
470
471 rc = ipc_read_msg(ept->chan, mi.id, 0, &msg);
472 ipc_put_msg(ept->chan, mi.id);
473
474 if (rc < 0)
475 return set_status(dev, opcode, rc, 0);
476 if (rc < (int)mi.len)
477 return set_status(dev, opcode, ERR_BAD_LEN, 0);
478
479 return set_status(dev, opcode, rc, mi.len);
480 }
481
dev_has_event(struct ql_tipc_dev * dev,void * ns_data,size_t ns_sz,uint32_t target)482 static long dev_has_event(struct ql_tipc_dev* dev,
483 void* ns_data,
484 size_t ns_sz,
485 uint32_t target) {
486 const int opcode = QL_TIPC_DEV_FC_HAS_EVENT;
487
488 if (ns_sz < (sizeof(struct tipc_cmd_hdr) + sizeof(bool)) ||
489 ns_sz > dev->ns_sz) {
490 return set_status(dev, opcode, ERR_INVALID_ARGS, 0);
491 }
492
493 bool* ready = (bool*)((uint8_t*)dev->ns_va + sizeof(struct tipc_cmd_hdr));
494 *ready = handle_set_ready(dev->handle_set);
495 return set_status(dev, opcode, 0, sizeof(*ready));
496 }
497
dev_get_event(struct ql_tipc_dev * dev,void * ns_data,size_t ns_sz,uint32_t target)498 static long dev_get_event(struct ql_tipc_dev* dev,
499 void* ns_data,
500 size_t ns_sz,
501 uint32_t target)
502
503 {
504 int rc;
505 struct handle* chan;
506 struct tipc_wait_req req;
507 uint32_t chan_event = 0;
508 struct tipc_ept* ept = NULL;
509 const int opcode = QL_TIPC_DEV_GET_EVENT;
510 struct tipc_event* evt = (struct tipc_event*)((uint8_t*)dev->ns_va +
511 sizeof(struct tipc_cmd_hdr));
512
513 if (ns_sz < sizeof(req) || ns_sz > dev->ns_sz)
514 return set_status(dev, opcode, ERR_INVALID_ARGS, 0);
515
516 if (target) {
517 /* wait on specific handle */
518 ept = ept_lookup(dev, target);
519 if (!ept || !ept->chan)
520 return set_status(dev, opcode, ERR_INVALID_ARGS, 0);
521
522 chan = ept->chan;
523 rc = handle_wait(chan, &chan_event, 0);
524 if (rc == ERR_TIMED_OUT) {
525 /* no events return an empty event */
526 evt->handle = 0;
527 evt->event = 0;
528 evt->cookie = 0;
529 } else if (rc < 0) {
530 /* only possible if something is corrupted or somebody is
531 * already waiting on the same handle
532 */
533 panic("%s: couldn't wait for handle events (%d)\n", __func__, rc);
534 } else {
535 /* got an event: return it */
536 evt->handle = target;
537 evt->event = chan_event;
538 evt->cookie = ept->cookie;
539 }
540 } else {
541 struct handle_ref hsevt;
542 /* wait for event with 0-timeout */
543 rc = handle_set_wait(dev->handle_set, &hsevt, 0);
544 if (rc == ERR_NOT_FOUND) {
545 /* no handles left */
546 return set_status(dev, opcode, rc, 0);
547 }
548
549 if (rc < 0) {
550 if (rc == ERR_TIMED_OUT) {
551 /* no events: return an empty event */
552 evt->handle = 0;
553 evt->event = 0;
554 evt->cookie = 0;
555 } else {
556 /* only possible if somebody else is waiting
557 on the same handle which should never happen */
558 panic("%s: couldn't wait for handle events (%d)\n", __func__,
559 rc);
560 }
561 } else {
562 /* got an event: return it */
563 ept = hsevt.cookie;
564
565 evt->handle = ept_to_addr(dev, ept);
566 evt->event = hsevt.emask;
567 evt->cookie = ept->cookie;
568
569 /* drop ref obtained by handle_set_wait */
570 handle_decref(hsevt.handle);
571 }
572 }
573
574 return set_status(dev, opcode, 0, sizeof(*evt));
575 }
576
dev_handle_fc_cmd(struct ql_tipc_dev * dev,const struct tipc_cmd_hdr * cmd,void * ns_payload)577 static long dev_handle_fc_cmd(struct ql_tipc_dev* dev,
578 const struct tipc_cmd_hdr* cmd,
579 void* ns_payload) {
580 DEBUG_ASSERT(dev);
581 switch (cmd->opcode) {
582 case QL_TIPC_DEV_FC_HAS_EVENT:
583 return dev_has_event(dev, ns_payload, cmd->payload_len, cmd->handle);
584
585 default:
586 LTRACEF("0x%x: unhandled cmd\n", cmd->opcode);
587 return set_status(dev, cmd->opcode, ERR_NOT_SUPPORTED, 0);
588 }
589 }
590
dev_handle_cmd(struct ql_tipc_dev * dev,const struct tipc_cmd_hdr * cmd,void * ns_payload)591 static long dev_handle_cmd(struct ql_tipc_dev* dev,
592 const struct tipc_cmd_hdr* cmd,
593 void* ns_payload) {
594 DEBUG_ASSERT(dev);
595
596 switch (cmd->opcode) {
597 case QL_TIPC_DEV_SEND:
598 return dev_send(dev, ns_payload, cmd->payload_len, cmd->handle);
599
600 case QL_TIPC_DEV_RECV:
601 return dev_recv(dev, cmd->handle);
602
603 case QL_TIPC_DEV_GET_EVENT:
604 return dev_get_event(dev, ns_payload, cmd->payload_len, cmd->handle);
605
606 case QL_TIPC_DEV_CONNECT:
607 return dev_connect(dev, ns_payload, cmd->payload_len);
608
609 case QL_TIPC_DEV_DISCONNECT:
610 return dev_disconnect(dev, cmd->handle);
611
612 default:
613 LTRACEF("0x%x: unhandled cmd\n", cmd->opcode);
614 return set_status(dev, cmd->opcode, ERR_NOT_SUPPORTED, 0);
615 }
616 }
617
ql_tipc_create_device(ext_mem_client_id_t client_id,ext_mem_obj_id_t buf_id,ns_size_t buf_sz,uint buf_mmu_flags)618 long ql_tipc_create_device(ext_mem_client_id_t client_id,
619 ext_mem_obj_id_t buf_id,
620 ns_size_t buf_sz,
621 uint buf_mmu_flags) {
622 return dev_create(client_id, buf_id, buf_sz, buf_mmu_flags);
623 }
624
ql_tipc_shutdown_device(ext_mem_client_id_t client_id,ext_mem_obj_id_t buf_id)625 long ql_tipc_shutdown_device(ext_mem_client_id_t client_id,
626 ext_mem_obj_id_t buf_id) {
627 struct ql_tipc_dev* dev = dev_acquire(client_id, buf_id);
628 if (!dev) {
629 LTRACEF("0x%" PRIx64 ": device not found\n", buf_id);
630 return SM_ERR_INVALID_PARAMETERS;
631 }
632 dev_shutdown(dev);
633 return 0;
634 }
635
ql_tipc_handle_cmd(ext_mem_client_id_t client_id,ext_mem_obj_id_t buf_id,ns_size_t cmd_sz,bool is_fc)636 long ql_tipc_handle_cmd(ext_mem_client_id_t client_id,
637 ext_mem_obj_id_t buf_id,
638 ns_size_t cmd_sz,
639 bool is_fc) {
640 long ret = SM_ERR_INVALID_PARAMETERS;
641 struct tipc_cmd_hdr cmd_hdr;
642
643 /* lookup device */
644 struct ql_tipc_dev* dev = dev_acquire(client_id, buf_id);
645 if (!dev) {
646 TRACEF("0x%" PRIx64 ": device not found\n", buf_id);
647 goto err_not_found;
648 }
649
650 /* check for valid size */
651 if (cmd_sz < sizeof(cmd_hdr) || cmd_sz > dev->ns_sz) {
652 TRACEF("message size invalid (%zu)\n", (size_t)cmd_sz);
653 goto err_invalid;
654 }
655
656 /* copy out command header */
657 memcpy(&cmd_hdr, dev->ns_va, sizeof(cmd_hdr));
658
659 /* check for consistency */
660 if (cmd_hdr.payload_len != (cmd_sz - sizeof(cmd_hdr))) {
661 TRACEF("malformed command\n");
662 goto err_invalid;
663 }
664
665 if (is_fc) {
666 ret = dev_handle_fc_cmd(dev, &cmd_hdr, dev->ns_va + sizeof(cmd_hdr));
667 } else {
668 ret = dev_handle_cmd(dev, &cmd_hdr, dev->ns_va + sizeof(cmd_hdr));
669 }
670 err_invalid:
671 dev_release(dev);
672 err_not_found:
673 return ret;
674 }
675