1 /*
2 * Copyright (C) 2014-2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <stddef.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <trusty/time.h>
22 #include <trusty_ipc.h>
23 #include <uapi/err.h>
24
25 #define TLOG_LVL TLOG_LVL_INFO
26 #define TLOG_TAG "ipc-unittest-main"
27 #include <trusty_unittest.h>
28
29 #include <app/ipc_unittest/common.h>
30 #include <ipc_unittest_uuid_consts.h>
31 #include <lib/tipc/tipc.h>
32 #include <lib/unittest/unittest.h>
33
34 /* base of valid handle range */
35 static handle_t handle_base;
36
37 /* offset from handle_base of the first handle that is not used */
38 static unsigned int first_free_handle_index;
39
40 static const uuid_t srv_app_uuid = IPC_UNITTEST_SRV_APP_UUID;
41 static const uintptr_t COOKIE_BASE = 100;
42
43 #define ABORT_IF(_cond, lbl) \
44 { \
45 if (_cond) { \
46 goto lbl; \
47 } \
48 }
49
50 #define ABORT_IF_NOT_OK(lbl) ABORT_IF((HasFailure()), lbl)
51
52 #define EXPECT_GT_ZERO(val, args...) EXPECT_GT(val, 0, ##args)
53 #define EXPECT_GE_ZERO(val, args...) EXPECT_GE(val, 0, ##args)
54
55 /*
56 * TODO(b/294212953): Fix the inherent issue and enable tests
57 * With source based code coverage for unit test enabled, the
58 * coverage aggregator and potentially the coverage daemon
59 * have extra handles that upset the calculation of the first
60 * free handle, so disable these tests.
61 */
62 #if UNITTEST_COVERAGE
63 #define DISABLED_WITH_COVERAGE(name) DISABLED_##name
64 #else
65 #define DISABLED_WITH_COVERAGE(name) name
66 #endif
67
68 /****************************************************************************/
69
70 /*
71 * Fill specified buffer with incremental pattern
72 */
fill_test_buf(uint8_t * buf,size_t cnt,uint8_t seed)73 static void fill_test_buf(uint8_t* buf, size_t cnt, uint8_t seed) {
74 if (!buf || !cnt)
75 return;
76
77 for (; cnt > 0; cnt--) {
78 *buf++ = seed++;
79 }
80 }
81
82 /*
83 * Local wrapper on top of async connect that provides
84 * synchronos connect with timeout.
85 */
sync_connect(const char * path,unsigned int timeout)86 int sync_connect(const char* path, unsigned int timeout) {
87 int rc;
88 uevent_t evt;
89 handle_t chan;
90
91 rc = connect(path, IPC_CONNECT_ASYNC | IPC_CONNECT_WAIT_FOR_PORT);
92 if (rc >= 0) {
93 chan = (handle_t)rc;
94 rc = wait(chan, &evt, timeout);
95 if (rc == 0) {
96 rc = ERR_BAD_STATE;
97 if (evt.handle == chan) {
98 if (evt.event & IPC_HANDLE_POLL_READY)
99 return chan;
100
101 if (evt.event & IPC_HANDLE_POLL_HUP)
102 rc = ERR_CHANNEL_CLOSED;
103 }
104 }
105 close(chan);
106 }
107 return rc;
108 }
109
110 /****************************************************************************/
111
112 /*
113 * wait on handle negative test
114 */
TEST(ipc,DISABLED_WITH_COVERAGE (wait_negative))115 TEST(ipc, DISABLED_WITH_COVERAGE(wait_negative)) {
116 int rc;
117 uevent_t event;
118 uint32_t timeout = 1000; // 1 sec
119
120 /* waiting on invalid handle. */
121 rc = wait(INVALID_IPC_HANDLE, &event, timeout);
122 EXPECT_EQ(ERR_BAD_HANDLE, rc, "wait on invalid handle");
123
124 /*
125 * call wait on an invalid (out of range) handle
126 *
127 * check handling of the following cases:
128 * - handle is on the upper boundary of valid handle range
129 * - handle is above of the upper boundary of valid handle range
130 * - handle is below of valid handle range
131 *
132 * in all cases, the expected result is ERR_BAD_HANDLE error.
133 */
134 rc = wait(handle_base + MAX_USER_HANDLES, &event, timeout);
135 EXPECT_EQ(ERR_BAD_HANDLE, rc, "wait on invalid handle");
136
137 rc = wait(handle_base + MAX_USER_HANDLES + 1, &event, timeout);
138 EXPECT_EQ(ERR_BAD_HANDLE, rc, "wait on invalid handle");
139
140 rc = wait(handle_base - 1, &event, timeout);
141 EXPECT_EQ(ERR_BAD_HANDLE, rc, "wait on invalid handle");
142
143 /* waiting on non-existing handle that is in valid range. */
144 for (unsigned int i = first_free_handle_index; i < MAX_USER_HANDLES; i++) {
145 rc = wait(handle_base + i, &event, timeout);
146 EXPECT_EQ(ERR_NOT_FOUND, rc, "wait on invalid handle");
147 }
148 }
149
150 /*
151 * Close handle unittest
152 */
TEST(ipc,DISABLED_WITH_COVERAGE (close_handle_negative))153 TEST(ipc, DISABLED_WITH_COVERAGE(close_handle_negative)) {
154 int rc;
155
156 /* closing an invalid (negative value) handle. */
157 rc = close(INVALID_IPC_HANDLE);
158 EXPECT_EQ(ERR_BAD_HANDLE, rc, "closing invalid handle");
159
160 /*
161 * call close on an invalid (out of range) handle
162 *
163 * check handling of the following cases:
164 * - handle is on the upper boundary of valid handle range
165 * - handle is above of the upper boundary of valid handle range
166 * - handle is below of valid handle range
167 *
168 * in all cases, the expected result is ERR_BAD_HANDLE error.
169 */
170 rc = close(handle_base + MAX_USER_HANDLES);
171 EXPECT_EQ(ERR_BAD_HANDLE, rc, "closing invalid handle");
172
173 rc = close(handle_base + MAX_USER_HANDLES + 1);
174 EXPECT_EQ(ERR_BAD_HANDLE, rc, "closing invalid handle");
175
176 rc = close(handle_base - 1);
177 EXPECT_EQ(ERR_BAD_HANDLE, rc, "closing invalid handle");
178
179 /* closing non-existing handle that is in valid range. */
180 for (unsigned int i = first_free_handle_index; i < MAX_USER_HANDLES; i++) {
181 rc = close(handle_base + i);
182 EXPECT_EQ(ERR_NOT_FOUND, rc, "closing invalid handle");
183 }
184 }
185
186 /*
187 * Set cookie negative unittest
188 */
TEST(ipc,DISABLED_WITH_COVERAGE (set_cookie_negative))189 TEST(ipc, DISABLED_WITH_COVERAGE(set_cookie_negative)) {
190 int rc;
191
192 /* set cookie for invalid (negative value) handle. */
193 rc = set_cookie(INVALID_IPC_HANDLE, (void*)0x1BEEF);
194 EXPECT_EQ(ERR_BAD_HANDLE, rc, "set cookie for invalid handle");
195
196 /*
197 * calling set cookie for an invalid (out of range) handle
198 *
199 * check handling of the following cases:
200 * - handle is on the upper boundary of valid handle range
201 * - handle is above of the upper boundary of valid handle range
202 * - handle is below of valid handle range
203 *
204 * in all cases, the expected result is ERR_BAD_HANDLE error.
205 */
206 rc = set_cookie(handle_base + MAX_USER_HANDLES, (void*)0x2BEEF);
207 EXPECT_EQ(ERR_BAD_HANDLE, rc, "set cookie for invalid handle");
208
209 rc = set_cookie(handle_base + MAX_USER_HANDLES + 1, (void*)0x2BEEF);
210 EXPECT_EQ(ERR_BAD_HANDLE, rc, "set cookie for invalid handle");
211
212 rc = set_cookie(handle_base - 1, (void*)0x2BEEF);
213 EXPECT_EQ(ERR_BAD_HANDLE, rc, "set cookie for invalid handle");
214
215 /* set cookie for non-existing handle that is in valid range. */
216 for (unsigned int i = first_free_handle_index; i < MAX_USER_HANDLES; i++) {
217 rc = set_cookie(handle_base + i, (void*)0x3BEEF);
218 EXPECT_EQ(ERR_NOT_FOUND, rc, "set cookie for invalid handle");
219 }
220 }
221
222 /*
223 * Duplicate handle unittest
224 */
TEST(ipc,DISABLED_WITH_COVERAGE (dup_negative))225 TEST(ipc, DISABLED_WITH_COVERAGE(dup_negative)) {
226 int rc;
227
228 /* duplicating an invalid (negative value) handle. */
229 rc = dup(INVALID_IPC_HANDLE);
230 EXPECT_EQ(ERR_BAD_HANDLE, rc, "duplicating invalid handle");
231
232 /*
233 * call dup on an invalid (out of range) handle
234 *
235 * check handling of the following cases:
236 * - handle is on the upper boundary of valid handle range
237 * - handle is above of the upper boundary of valid handle range
238 * - handle is below of valid handle range
239 *
240 * in all cases, the expected result is ERR_BAD_HANDLE error.
241 */
242 rc = dup(handle_base + MAX_USER_HANDLES);
243 EXPECT_EQ(ERR_BAD_HANDLE, rc, "duplicating invalid handle");
244
245 rc = dup(handle_base + MAX_USER_HANDLES + 1);
246 EXPECT_EQ(ERR_BAD_HANDLE, rc, "duplicating invalid handle");
247
248 rc = dup(handle_base - 1);
249 EXPECT_EQ(ERR_BAD_HANDLE, rc, "duplicating invalid handle");
250
251 /* duplicating non-existing handle that is in valid range. */
252 for (unsigned int i = first_free_handle_index; i < MAX_USER_HANDLES; i++) {
253 rc = dup(handle_base + i);
254 EXPECT_EQ(ERR_NOT_FOUND, rc, "duplicating invalid handle");
255 }
256 }
257
258 /****************************************************************************/
259
260 /*
261 * Port create unittest
262 */
TEST(ipc,port_create_negative)263 TEST(ipc, port_create_negative) {
264 int rc;
265 char path[MAX_PORT_PATH_LEN + 16];
266
267 /* create port with empty path */
268 path[0] = '\0';
269 rc = port_create(path, 2, 64, 0);
270 EXPECT_EQ(ERR_INVALID_ARGS, rc, "empty path srv");
271
272 /* create port with zero buffers */
273 sprintf(path, "%s.port", SRV_PATH_BASE);
274 rc = port_create(path, 0, 64, 0);
275 EXPECT_EQ(ERR_INVALID_ARGS, rc, "no buffers");
276
277 /* create port with zero buffer size */
278 sprintf(path, "%s.port", SRV_PATH_BASE);
279 rc = port_create(path, 2, 0, 0);
280 EXPECT_EQ(ERR_INVALID_ARGS, rc, "zero buf size");
281
282 /* create port with large number of buffers */
283 sprintf(path, "%s.port", SRV_PATH_BASE);
284 rc = port_create(path, MAX_PORT_BUF_NUM * 100, 64, 0);
285 EXPECT_EQ(ERR_INVALID_ARGS, rc, "large buf num");
286
287 /* create port with large buffer size */
288 sprintf(path, "%s.port", SRV_PATH_BASE);
289 rc = port_create(path, 2, MAX_PORT_BUF_SIZE * 100, 0);
290 EXPECT_EQ(ERR_INVALID_ARGS, rc, "large buf size");
291
292 /* create port with path oversized name */
293 int len = sprintf(path, "%s.port", SRV_PATH_BASE);
294 for (size_t i = len; i < sizeof(path); i++)
295 path[i] = 'a';
296 path[sizeof(path) - 1] = '\0';
297 rc = port_create(path, 2, MAX_PORT_BUF_SIZE, 0);
298 EXPECT_EQ(ERR_INVALID_ARGS, rc, "path is too long");
299 rc = close((handle_t)rc);
300 EXPECT_EQ(ERR_BAD_HANDLE, rc, "close port");
301 }
302
TEST(ipc,DISABLED_WITH_COVERAGE (port_create))303 TEST(ipc, DISABLED_WITH_COVERAGE(port_create)) {
304 int rc;
305 unsigned int i;
306 char path[MAX_PORT_PATH_LEN];
307 handle_t ports[MAX_USER_HANDLES];
308
309 /* create maximum number of ports */
310 for (i = first_free_handle_index; i < MAX_USER_HANDLES - 1; i++) {
311 sprintf(path, "%s.port.%s%d", SRV_PATH_BASE, "test", i);
312 rc = port_create(path, 2, MAX_PORT_BUF_SIZE, 0);
313 EXPECT_GT_ZERO(rc, "create ports");
314 ports[i] = (handle_t)rc;
315
316 /* create a new port that collide with an existing port */
317 rc = port_create(path, 2, MAX_PORT_BUF_SIZE, 0);
318 EXPECT_EQ(ERR_ALREADY_EXISTS, rc, "create existing port");
319 }
320
321 /* create one more that should succeed */
322 sprintf(path, "%s.port.%s%d", SRV_PATH_BASE, "test", i);
323 rc = port_create(path, 2, MAX_PORT_BUF_SIZE, 0);
324 EXPECT_GT_ZERO(rc, "create ports");
325 ports[i] = (handle_t)rc;
326
327 /* but creating colliding port should fail with different
328 error code because we actually exceeded max number of
329 handles instead of colliding with an existing path */
330 rc = port_create(path, 2, MAX_PORT_BUF_SIZE, 0);
331 EXPECT_EQ(ERR_NO_RESOURCES, rc, "create existing port");
332
333 sprintf(path, "%s.port.%s%d", SRV_PATH_BASE, "test", MAX_USER_HANDLES);
334 rc = port_create(path, 2, MAX_PORT_BUF_SIZE, 0);
335 EXPECT_EQ(ERR_NO_RESOURCES, rc, "max ports");
336
337 /* close them all */
338 for (i = first_free_handle_index; i < MAX_USER_HANDLES; i++) {
339 /* close a valid port */
340 rc = close(ports[i]);
341 EXPECT_EQ(NO_ERROR, rc, "closing port");
342
343 /* close previously closed port. It should fail! */
344 rc = close(ports[i]);
345 EXPECT_EQ(ERR_NOT_FOUND, rc, "closing closed port");
346
347 ports[i] = INVALID_IPC_HANDLE;
348 }
349 }
350
351 /*
352 *
353 */
TEST(ipc,DISABLED_WITH_COVERAGE (wait_on_port))354 TEST(ipc, DISABLED_WITH_COVERAGE(wait_on_port)) {
355 int rc;
356 uevent_t event;
357 char path[MAX_PORT_PATH_LEN];
358 handle_t ports[MAX_USER_HANDLES];
359
360 /* create maximum number of ports */
361 for (unsigned int i = first_free_handle_index; i < MAX_USER_HANDLES; i++) {
362 sprintf(path, "%s.port.%s%d", SRV_PATH_BASE, "test", i);
363 rc = port_create(path, 2, MAX_PORT_BUF_SIZE, 0);
364 EXPECT_GT_ZERO(rc, "max ports");
365 ports[i] = (handle_t)rc;
366
367 rc = set_cookie(ports[i], (void*)(COOKIE_BASE + i));
368 EXPECT_EQ(NO_ERROR, rc, "set cookie on port");
369 }
370
371 /* wait on each individual port */
372 for (unsigned int i = first_free_handle_index; i < MAX_USER_HANDLES; i++) {
373 /* wait with zero timeout */
374 rc = wait(ports[i], &event, 0);
375 EXPECT_EQ(ERR_TIMED_OUT, rc, "zero timeout");
376
377 /* wait with non-zero timeout */
378 rc = wait(ports[i], &event, 100);
379 EXPECT_EQ(ERR_TIMED_OUT, rc, "non-zero timeout");
380 }
381
382 /* wait on all ports with zero timeout */
383 rc = wait_any(&event, 0);
384 EXPECT_EQ(ERR_TIMED_OUT, rc, "zero timeout");
385
386 /* wait on all ports with non-zero timeout*/
387 rc = wait_any(&event, 100);
388 EXPECT_EQ(ERR_TIMED_OUT, rc, "non-zero timeout");
389
390 /* close them all */
391 for (unsigned int i = first_free_handle_index; i < MAX_USER_HANDLES; i++) {
392 /* close a valid port */
393 rc = close(ports[i]);
394 EXPECT_EQ(NO_ERROR, rc, "closing closed port");
395 ports[i] = INVALID_IPC_HANDLE;
396 }
397 }
398
399 /****************************************************************************/
400
401 /*
402 * Connect unittests
403 */
TEST(ipc,connect_negative)404 TEST(ipc, connect_negative) {
405 int rc;
406 char path[MAX_PORT_PATH_LEN + 16] = "";
407
408 /* try to connect to port with an empty name */
409 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
410 EXPECT_EQ(ERR_INVALID_ARGS, rc, "empty path");
411
412 /* try to connect to non-existing port */
413 sprintf(path, "%s.conn.%s", SRV_PATH_BASE, "blah-blah");
414 rc = connect(path, 0);
415 EXPECT_EQ(ERR_NOT_FOUND, rc, "non-existing path");
416
417 /* try to connect to non-existing port */
418 sprintf(path, "%s.conn.%s", SRV_PATH_BASE, "blah-blah");
419 rc = connect(path, IPC_CONNECT_ASYNC);
420 EXPECT_EQ(ERR_NOT_FOUND, rc, "non-existing path");
421
422 /* try to connect to port with very long name */
423 int len = sprintf(path, "%s.conn.", SRV_PATH_BASE);
424 for (size_t i = len; i < sizeof(path); i++)
425 path[i] = 'a';
426 path[sizeof(path) - 1] = '\0';
427 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
428 EXPECT_EQ(ERR_INVALID_ARGS, rc, "long path");
429
430 rc = close((handle_t)rc);
431 EXPECT_EQ(ERR_BAD_HANDLE, rc, "close channel");
432 }
433
TEST(ipc,DISABLED_WITH_COVERAGE (connect_close))434 TEST(ipc, DISABLED_WITH_COVERAGE(connect_close)) {
435 int rc;
436 char path[MAX_PORT_PATH_LEN];
437 handle_t chans[16];
438
439 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "datasink");
440
441 for (unsigned int j = first_free_handle_index; j < MAX_USER_HANDLES; j++) {
442 /* do several iterations to make sure we are not
443 not loosing handles */
444 for (unsigned int i = 0; i < countof(chans); i++) {
445 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
446 EXPECT_GT_ZERO(rc, "connect/close");
447 chans[i] = (handle_t)rc;
448 }
449
450 for (unsigned int i = 0; i < countof(chans); i++) {
451 rc = close(chans[i]);
452 EXPECT_EQ(NO_ERROR, rc, "connect/close");
453 }
454 }
455 }
456
run_connect_close_by_peer_test(const char * test)457 static void run_connect_close_by_peer_test(const char* test) {
458 int rc;
459 char path[MAX_PORT_PATH_LEN];
460 uevent_t event;
461 handle_t chans[16];
462 unsigned int chan_cnt = 0;
463
464 /*
465 * open up to 16 connection to specified test port which would
466 * close them all in a different way:
467 */
468 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, test);
469 for (unsigned int i = 0; i < countof(chans); i++) {
470 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
471
472 /*
473 * depending on task scheduling connect might return real
474 * handle that will be closed later or it might return
475 * ERR_CHANNEL_CLOSED error if the channel has already been
476 * closed. Both cases are correct and must be handled.
477 */
478 if (rc >= 0) {
479 /* got real handle */
480 chans[i] = (handle_t)rc;
481
482 /* attach cookie for returned channel */
483 rc = set_cookie((handle_t)rc, (void*)(COOKIE_BASE + i));
484 EXPECT_EQ(NO_ERROR, rc, "%s", test);
485
486 chan_cnt++;
487 } else {
488 /* could be already closed channel */
489 EXPECT_EQ(ERR_CHANNEL_CLOSED, rc, "%s", test);
490 }
491
492 /* check if any channels are closed */
493 while ((rc = wait_any(&event, 0)) == NO_ERROR) {
494 EXPECT_EQ(IPC_HANDLE_POLL_HUP, event.event, "%s", test);
495 uintptr_t idx = (uintptr_t)event.cookie - COOKIE_BASE;
496 EXPECT_EQ(chans[idx], event.handle, "%s", test);
497 EXPECT_GT(countof(chans), idx, "%s", test);
498 if (idx < countof(chans)) {
499 rc = close(chans[idx]);
500 EXPECT_EQ(NO_ERROR, rc, "%s", test);
501 chans[idx] = INVALID_IPC_HANDLE;
502 }
503 chan_cnt--;
504 }
505 }
506
507 /* wait until all channels are closed */
508 while (chan_cnt) {
509 rc = wait_any(&event, 10000);
510 EXPECT_EQ(NO_ERROR, rc, "%s", test);
511 EXPECT_EQ(IPC_HANDLE_POLL_HUP, event.event, "%s", test);
512
513 uintptr_t idx = (uintptr_t)event.cookie - COOKIE_BASE;
514 EXPECT_GT(countof(chans), idx, "%s", test);
515 EXPECT_EQ(chans[idx], event.handle, "%s", test);
516 if (idx < countof(chans)) {
517 rc = close(chans[idx]);
518 EXPECT_EQ(NO_ERROR, rc, "%s", test);
519 chans[idx] = INVALID_IPC_HANDLE;
520 }
521 chan_cnt--;
522 }
523
524 EXPECT_EQ(0, chan_cnt, "%s", test);
525 }
526
TEST(ipc,connect_close_by_peer_1)527 TEST(ipc, connect_close_by_peer_1) {
528 run_connect_close_by_peer_test("closer1");
529 }
530
TEST(ipc,connect_close_by_peer_2)531 TEST(ipc, connect_close_by_peer_2) {
532 run_connect_close_by_peer_test("closer2");
533 }
534
TEST(ipc,connect_close_by_peer_3)535 TEST(ipc, connect_close_by_peer_3) {
536 run_connect_close_by_peer_test("closer3");
537 }
538
TEST(ipc,async_connect)539 TEST(ipc, async_connect) {
540 int rc;
541 handle_t chan;
542 uevent_t event;
543 uuid_t peer_uuid = UUID_INITIAL_VALUE(peer_uuid);
544 char path[MAX_PORT_PATH_LEN];
545
546 sprintf(path, "%s.main.%s", SRV_PATH_BASE, "async");
547
548 /* connect to non existing port synchronously without wait_for_port */
549 rc = connect(path, 0);
550 EXPECT_EQ(ERR_NOT_FOUND, rc, "async");
551 rc = close((handle_t)rc);
552 EXPECT_EQ(ERR_BAD_HANDLE, rc, "async");
553
554 /* connect to non existing port asynchronously without wait_for_port */
555 rc = connect(path, IPC_CONNECT_ASYNC);
556 EXPECT_EQ(ERR_NOT_FOUND, rc, "async");
557 rc = close((handle_t)rc);
558 EXPECT_EQ(ERR_BAD_HANDLE, rc, "async");
559
560 /* connect to non existing port asynchronously with wait_for_port */
561 rc = connect(path, IPC_CONNECT_ASYNC | IPC_CONNECT_WAIT_FOR_PORT);
562 EXPECT_GT_ZERO(rc, "async");
563 if (rc >= 0) {
564 chan = (handle_t)rc;
565
566 /* wait on channel */
567 rc = wait(chan, &event, 1000);
568 EXPECT_EQ(ERR_TIMED_OUT, rc, "async");
569
570 /* and close it */
571 rc = close(chan);
572 EXPECT_EQ(NO_ERROR, rc, "async");
573 }
574
575 /* connect to non-existing port asynchronously with wait_for_port */
576 rc = connect(path, IPC_CONNECT_ASYNC | IPC_CONNECT_WAIT_FOR_PORT);
577 EXPECT_GT_ZERO(rc, "async");
578 chan = (handle_t)rc;
579
580 if (rc >= 0) {
581 handle_t port;
582 uint32_t exp_event;
583
584 /* wait on channel for connect */
585 rc = wait(chan, &event, 100);
586 EXPECT_EQ(ERR_TIMED_OUT, rc, "async");
587
588 /* now create port */
589 rc = port_create(path, 1, 64, IPC_PORT_ALLOW_TA_CONNECT);
590 EXPECT_GT_ZERO(rc, "async");
591 if (rc >= 0) {
592 port = (handle_t)rc;
593
594 /* and wait for incomming connections */
595 exp_event = IPC_HANDLE_POLL_READY;
596 rc = wait(port, &event, 1000);
597 EXPECT_EQ(NO_ERROR, rc, "async");
598 EXPECT_EQ(exp_event, event.event, "async");
599
600 if (rc == NO_ERROR) {
601 handle_t srv_chan;
602
603 /* got one, accept it */
604 rc = accept(port, &peer_uuid);
605 EXPECT_GT_ZERO(rc, "async");
606 srv_chan = (handle_t)rc;
607
608 /* and close it */
609 close(srv_chan);
610
611 /* now wait on original chan:
612 * there should be READY and HUP events
613 */
614 exp_event = IPC_HANDLE_POLL_READY | IPC_HANDLE_POLL_HUP;
615 rc = wait(chan, &event, 1000);
616 EXPECT_EQ(NO_ERROR, rc, "async");
617 EXPECT_EQ(exp_event, event.event, "async");
618 }
619 close(port);
620 }
621 close(chan);
622 }
623 }
624
TEST(ipc,connect_selfie)625 TEST(ipc, connect_selfie) {
626 int rc;
627 uuid_t peer_uuid = UUID_INITIAL_VALUE(peer_uuid);
628 uuid_t zero_uuid = UUID_INITIAL_VALUE(zero_uuid);
629 char path[MAX_PORT_PATH_LEN];
630 uint32_t connect_timeout = 1000; // 1 sec
631
632 /* Try to connect to port that we register ourself.
633 It is not very useful scenario, just to make sure that
634 nothing bad is happening */
635 sprintf(path, "%s.main.%s", SRV_PATH_BASE, "selfie");
636 rc = port_create(path, 2, MAX_PORT_BUF_SIZE, IPC_PORT_ALLOW_TA_CONNECT);
637 EXPECT_GT_ZERO(rc, "selfie");
638
639 if (rc >= 0) {
640 handle_t test_port = rc;
641
642 /* Since we are single threaded we will always timeout
643 * at wait_any for synchronous connect as can't accept.
644 */
645
646 /* with non-zero timeout */
647 rc = sync_connect(path, connect_timeout);
648 EXPECT_EQ(ERR_TIMED_OUT, rc, "selfie sync");
649
650 /* with zero timeout */
651 rc = sync_connect(path, 0);
652 EXPECT_EQ(ERR_TIMED_OUT, rc, "selfie sync");
653
654 /* since we did not call wait on port yet we have
655 * 2 connection requests pending (attached to port)
656 * teared down by peer (us), an now removed from the
657 * port - there should be no events waiting.
658 */
659 uevent_t event;
660 EXPECT_EQ(ERR_TIMED_OUT, wait_any(&event, 0));
661
662 /* retry using a couple of closed async connections */
663 rc = connect(path, IPC_CONNECT_ASYNC);
664 EXPECT_GT(rc, 0, "selfie async");
665
666 rc = close(rc);
667 EXPECT_EQ(0, rc, "selfie async");
668
669 rc = connect(path, IPC_CONNECT_ASYNC);
670 EXPECT_GT(rc, 0, "selfie async");
671
672 rc = close(rc);
673 EXPECT_EQ(0, rc, "selfie async");
674
675 EXPECT_EQ(ERR_TIMED_OUT, wait_any(&event, 0));
676
677 /* retry using accept and sync connection */
678 rc = sync_connect(path, connect_timeout);
679 EXPECT_EQ(ERR_TIMED_OUT, rc, "selfie async");
680
681 rc = sync_connect(path, 0);
682 EXPECT_EQ(ERR_TIMED_OUT, rc, "selfie async");
683
684 /* try accepting a pending connection */
685 rc = accept(test_port, &peer_uuid);
686 EXPECT_EQ(ERR_NO_MSG, rc, "accept");
687
688 rc = memcmp(&peer_uuid, &zero_uuid, sizeof(zero_uuid));
689 EXPECT_EQ(0, rc, "accept");
690
691 /* retry with a pending then closed async connection */
692 rc = connect(path, IPC_CONNECT_ASYNC);
693 EXPECT_GT(rc, 0, "selfie async");
694
695 rc = close(rc);
696 EXPECT_EQ(0, rc, "selfie async");
697
698 /* try accepting a pending connection, that was already closed */
699 rc = accept(test_port, &peer_uuid);
700 EXPECT_EQ(ERR_NO_MSG, rc, "accept async");
701
702 /* retry with a pending async connection */
703 rc = connect(path, IPC_CONNECT_ASYNC);
704 EXPECT_GT(rc, 0, "selfie async");
705
706 handle_t client_port = rc;
707
708 unsigned int exp_event = IPC_HANDLE_POLL_READY;
709
710 int rc = wait_any(&event, INFINITE_TIME);
711 EXPECT_EQ(NO_ERROR, rc, "wait on port");
712 EXPECT_EQ(test_port, event.handle, "wait on port");
713 EXPECT_EQ(exp_event, event.event, "wait on port");
714
715 if (rc == NO_ERROR && (event.event & IPC_HANDLE_POLL_READY)) {
716 /* close the connection */
717 rc = close(client_port);
718 EXPECT_EQ(0, rc, "selfie async");
719
720 /* we had a pending connection, but it is already closed */
721 rc = accept(test_port, &peer_uuid);
722 EXPECT_EQ(ERR_NO_MSG, rc, "accept");
723
724 rc = memcmp(&peer_uuid, &zero_uuid, sizeof(zero_uuid));
725 EXPECT_EQ(0, rc, "accept")
726 }
727
728 /* add couple connections back and destroy them along with port */
729 rc = sync_connect(path, 0);
730 EXPECT_EQ(ERR_TIMED_OUT, rc, "selfie");
731
732 rc = sync_connect(path, 0);
733 EXPECT_EQ(ERR_TIMED_OUT, rc, "selfie");
734
735 /* close selfie port */
736 rc = close(test_port);
737 EXPECT_EQ(NO_ERROR, rc, "close selfie");
738 }
739 }
740
TEST(ipc,connect_access)741 TEST(ipc, connect_access) {
742 int rc;
743 char path[MAX_PORT_PATH_LEN];
744
745 /* open connection to NS only accessible service */
746 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "ns_only");
747 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
748
749 /* It is expected to fail */
750 EXPECT_EQ(ERR_ACCESS_DENIED, rc, "connect to ns_only");
751
752 if (rc >= 0)
753 close((handle_t)rc);
754
755 /* open connection to TA only accessible service */
756 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "ta_only");
757 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
758
759 /* it is expected to succeed */
760 EXPECT_GT_ZERO(rc, "connect to ta_only");
761
762 if (rc >= 0)
763 close((handle_t)rc);
764 }
765
766 /****************************************************************************/
767
768 /*
769 * Accept negative test
770 */
TEST(ipc,DISABLED_WITH_COVERAGE (accept_negative))771 TEST(ipc, DISABLED_WITH_COVERAGE(accept_negative)) {
772 int rc, rc1;
773 char path[MAX_PORT_PATH_LEN];
774 handle_t chan;
775 uuid_t peer_uuid = UUID_INITIAL_VALUE(peer_uuid);
776 uuid_t zero_uuid = UUID_INITIAL_VALUE(zero_uuid);
777
778 /* accept on invalid (negative value) handle */
779 rc = accept(INVALID_IPC_HANDLE, &peer_uuid);
780 EXPECT_EQ(ERR_BAD_HANDLE, rc, "accept on invalid handle");
781
782 rc1 = memcmp(&peer_uuid, &zero_uuid, sizeof(zero_uuid));
783 EXPECT_EQ(0, rc1, "accept")
784
785 /*
786 * calling accept on an invalid (out of range) handle
787 *
788 * check handling of the following cases:
789 * - handle is on the upper boundary of valid handle range
790 * - handle is above of the upper boundary of valid handle range
791 * - handle is below of valid handle range
792 *
793 * in all cases, the expected result is ERR_BAD_HANDLE error.
794 */
795 rc = accept(handle_base + MAX_USER_HANDLES, &peer_uuid);
796 EXPECT_EQ(ERR_BAD_HANDLE, rc, "accept on invalid handle");
797
798 rc = accept(handle_base + MAX_USER_HANDLES + 1, &peer_uuid);
799 EXPECT_EQ(ERR_BAD_HANDLE, rc, "accept on invalid handle");
800
801 rc = accept(handle_base - 1, &peer_uuid);
802 EXPECT_EQ(ERR_BAD_HANDLE, rc, "accept on invalid handle");
803
804 rc1 = memcmp(&peer_uuid, &zero_uuid, sizeof(zero_uuid));
805 EXPECT_EQ(0, rc1, "accept")
806
807 /* accept on non-existing handle that is in valid range */
808 for (unsigned int i = first_free_handle_index; i < MAX_USER_HANDLES; i++) {
809 rc = accept(handle_base + i, &peer_uuid);
810 EXPECT_EQ(ERR_NOT_FOUND, rc, "accept on invalid handle");
811
812 rc1 = memcmp(&peer_uuid, &zero_uuid, sizeof(zero_uuid));
813 EXPECT_EQ(0, rc1, "accept")
814 }
815
816 /* connect to datasink service */
817 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "datasink");
818 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
819 EXPECT_GT_ZERO(rc, "connect to datasink");
820 chan = (handle_t)rc;
821
822 /* call accept on channel handle which is an invalid operation */
823 rc = accept(chan, &peer_uuid);
824 EXPECT_EQ(ERR_INVALID_ARGS, rc, "accept on channel");
825
826 rc1 = memcmp(&peer_uuid, &zero_uuid, sizeof(zero_uuid));
827 EXPECT_EQ(0, rc1, "accept")
828
829 rc = close(chan);
830 EXPECT_EQ(NO_ERROR, rc, "close channnel")
831 }
832
833 /*
834 * Disabled per b/140836874 - believed to be a race in the test code, not
835 * in IPC.
836 */
TEST(ipc,DISABLED_accept)837 TEST(ipc, DISABLED_accept) {
838 int rc, rc1;
839 uevent_t event;
840 char path[MAX_PORT_PATH_LEN];
841 handle_t ports[MAX_USER_HANDLES];
842 uuid_t peer_uuid = UUID_INITIAL_VALUE(peer_uuid);
843 uuid_t zero_uuid = UUID_INITIAL_VALUE(zero_uuid);
844
845 /* create maximum number of ports */
846 for (unsigned int i = first_free_handle_index; i < MAX_USER_HANDLES; i++) {
847 sprintf(path, "%s.port.accept%d", SRV_PATH_BASE, i);
848 rc = port_create(path, 2, MAX_PORT_BUF_SIZE, IPC_PORT_ALLOW_TA_CONNECT);
849 EXPECT_GT_ZERO(rc, "max ports");
850 ports[i] = (handle_t)rc;
851
852 rc = set_cookie(ports[i], (void*)(COOKIE_BASE + ports[i]));
853 EXPECT_EQ(NO_ERROR, rc, "set cookie on port");
854 }
855
856 /* poke connect service to initiate connections to us */
857 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "connect");
858 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
859 if (rc >= 0)
860 close((handle_t)rc);
861
862 /* handle incoming connections */
863 for (unsigned int i = first_free_handle_index; i < MAX_USER_HANDLES; i++) {
864 rc = wait_any(&event, 1000);
865 EXPECT_EQ(NO_ERROR, rc, "accept test");
866 EXPECT_EQ(IPC_HANDLE_POLL_READY, event.event, "accept test");
867
868 /* check port cookie */
869 void* exp_cookie = (void*)(COOKIE_BASE + event.handle);
870 EXPECT_EQ(exp_cookie, event.cookie, "accept test");
871
872 /* accept connection - should fail because we do not
873 have any room for handles */
874 rc = accept(event.handle, &peer_uuid);
875 EXPECT_EQ(ERR_NO_RESOURCES, rc, "accept test");
876
877 /* check peer uuid */
878 rc1 = memcmp(&peer_uuid, &zero_uuid, sizeof(zero_uuid));
879 EXPECT_EQ(0, rc1, "accept test")
880 }
881
882 /* free 1 handle so we have room and repeat test */
883 rc = close(ports[first_free_handle_index]);
884 EXPECT_EQ(NO_ERROR, 0, "close accept test");
885 ports[2] = INVALID_IPC_HANDLE;
886
887 /* poke connect service to initiate connections to us */
888 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "connect");
889 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
890 if (rc >= 0)
891 close((handle_t)rc);
892
893 /* handle incoming connections */
894 for (unsigned int i = first_free_handle_index; i < MAX_USER_HANDLES - 1;
895 i++) {
896 rc = wait_any(&event, 3000);
897 EXPECT_EQ(NO_ERROR, rc, "accept test");
898 EXPECT_EQ(IPC_HANDLE_POLL_READY, event.event, "accept test");
899
900 /* check port cookie */
901 void* exp_cookie = (void*)(COOKIE_BASE + event.handle);
902 EXPECT_EQ(exp_cookie, event.cookie, "accept test");
903
904 rc = accept(event.handle, &peer_uuid);
905 EXPECT_EQ(handle_base + first_free_handle_index, rc, "accept test");
906
907 /* check peer uuid */
908 rc1 = memcmp(&peer_uuid, &srv_app_uuid, sizeof(srv_app_uuid));
909 EXPECT_EQ(0, rc1, "accept test")
910
911 rc = close((handle_t)rc);
912 EXPECT_EQ(NO_ERROR, rc, "accept test");
913 }
914
915 /* close them all */
916 for (unsigned int i = first_free_handle_index + 1; i < MAX_USER_HANDLES;
917 i++) {
918 /* close a valid port */
919 rc = close(ports[i]);
920 EXPECT_EQ(NO_ERROR, rc, "close port");
921 ports[i] = INVALID_IPC_HANDLE;
922 }
923 }
924
925 /****************************************************************************/
926
TEST(ipc,DISABLED_WITH_COVERAGE (get_msg_negative))927 TEST(ipc, DISABLED_WITH_COVERAGE(get_msg_negative)) {
928 int rc;
929 ipc_msg_info_t inf;
930 handle_t port;
931 handle_t chan;
932 char path[MAX_PORT_PATH_LEN];
933
934 /* get_msg on invalid (negative value) handle. */
935 rc = get_msg(INVALID_IPC_HANDLE, &inf);
936 EXPECT_EQ(ERR_BAD_HANDLE, rc, "get_msg on invalid handle");
937
938 /*
939 * calling get_msg on an invalid (out of range) handle
940 *
941 * check handling of the following cases:
942 * - handle is on the upper boundary of valid handle range
943 * - handle is above of the upper boundary of valid handle range
944 * - handle is below of valid handle range
945 *
946 * in all cases, the expected result is ERR_BAD_HANDLE error.
947 */
948 rc = get_msg(handle_base + MAX_USER_HANDLES, &inf);
949 EXPECT_EQ(ERR_BAD_HANDLE, rc, "get_msg on invalid handle");
950
951 rc = get_msg(handle_base + MAX_USER_HANDLES + 1, &inf);
952 EXPECT_EQ(ERR_BAD_HANDLE, rc, "get_msg on invalid handle");
953
954 rc = get_msg(handle_base - 1, &inf);
955 EXPECT_EQ(ERR_BAD_HANDLE, rc, "get_msg on invalid handle");
956
957 /* get_msg on non-existing handle that is in valid range. */
958 for (unsigned int i = first_free_handle_index; i < MAX_USER_HANDLES; i++) {
959 rc = get_msg(handle_base + i, &inf);
960 EXPECT_EQ(ERR_NOT_FOUND, rc, "get_msg on invalid handle");
961 }
962
963 /* calling get_msg on port handle should fail
964 because get_msg is only applicable to channels */
965 sprintf(path, "%s.main.%s", SRV_PATH_BASE, "datasink");
966 rc = port_create(path, 2, MAX_PORT_BUF_SIZE, IPC_PORT_ALLOW_TA_CONNECT);
967 EXPECT_GT_ZERO(rc, "create datasink port");
968 port = (handle_t)rc;
969
970 rc = get_msg(port, &inf);
971 EXPECT_EQ(ERR_INVALID_ARGS, rc, "get_msg on port");
972 close(port);
973
974 /* call get_msg on channel that do not have any pending messages */
975 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "datasink");
976 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
977 EXPECT_GT_ZERO(rc, "connect to datasink");
978 chan = (handle_t)rc;
979
980 rc = get_msg(chan, &inf);
981 EXPECT_EQ(ERR_NO_MSG, rc, "get_msg on empty channel");
982
983 rc = close(chan);
984 EXPECT_EQ(NO_ERROR, rc, "close channnel");
985 }
986
TEST(ipc,DISABLED_WITH_COVERAGE (put_msg_negative))987 TEST(ipc, DISABLED_WITH_COVERAGE(put_msg_negative)) {
988 int rc;
989 handle_t port;
990 handle_t chan;
991 char path[MAX_PORT_PATH_LEN];
992
993 /* put_msg on invalid (negative value) handle */
994 rc = put_msg(INVALID_IPC_HANDLE, 0);
995 EXPECT_EQ(ERR_BAD_HANDLE, rc, "put_msg on invalid handle");
996
997 /*
998 * calling put_msg on an invalid (out of range) handle
999 *
1000 * check handling of the following cases:
1001 * - handle is on the upper boundary of valid handle range
1002 * - handle is above of the upper boundary of valid handle range
1003 * - handle is below of valid handle range
1004 *
1005 * in all cases, the expected result is ERR_BAD_HANDLE error.
1006 */
1007 rc = put_msg(handle_base + MAX_USER_HANDLES, 0);
1008 EXPECT_EQ(ERR_BAD_HANDLE, rc, "put_msg on invalid handle");
1009
1010 rc = put_msg(handle_base + MAX_USER_HANDLES + 1, 0);
1011 EXPECT_EQ(ERR_BAD_HANDLE, rc, "put_msg on invalid handle");
1012
1013 rc = put_msg(handle_base - 1, 0);
1014 EXPECT_EQ(ERR_BAD_HANDLE, rc, "put_msg on invalid handle");
1015
1016 /* put_msg on non-existing handle that is in valid range */
1017 for (unsigned int i = first_free_handle_index; i < MAX_USER_HANDLES; i++) {
1018 rc = put_msg(handle_base + i, 0);
1019 EXPECT_EQ(ERR_NOT_FOUND, rc, "put_msg on invalid handle");
1020 }
1021
1022 /* calling put_msg on port handle should fail
1023 because put_msg is only applicable to channels */
1024 sprintf(path, "%s.main.%s", SRV_PATH_BASE, "datasink");
1025 rc = port_create(path, 2, MAX_PORT_BUF_SIZE, IPC_PORT_ALLOW_TA_CONNECT);
1026 EXPECT_GT_ZERO(rc, "create datasink port");
1027 port = (handle_t)rc;
1028
1029 rc = put_msg(port, 0);
1030 EXPECT_EQ(ERR_INVALID_ARGS, rc, "put_msg on port");
1031 rc = close(port);
1032 EXPECT_EQ(NO_ERROR, rc, "close port");
1033
1034 /* call put_msg on channel that do not have any pending messages */
1035 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "datasink");
1036 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
1037 EXPECT_GT_ZERO(rc, "connect to datasink");
1038 chan = (handle_t)rc;
1039
1040 rc = put_msg(chan, 0);
1041 EXPECT_EQ(ERR_INVALID_ARGS, rc, "put_msg on empty channel");
1042 rc = close(chan);
1043 EXPECT_EQ(NO_ERROR, rc, "close channel");
1044 }
1045
1046 /*
1047 * Send 10000 messages to datasink service
1048 */
TEST(ipc,send_msg)1049 TEST(ipc, send_msg) {
1050 int rc;
1051 handle_t chan;
1052 char path[MAX_PORT_PATH_LEN];
1053 uint8_t buf0[64];
1054 uint8_t buf1[64];
1055 struct iovec iov[2];
1056 ipc_msg_t msg;
1057
1058 /* prepare test buffer */
1059 fill_test_buf(buf0, sizeof(buf0), 0x55);
1060 fill_test_buf(buf1, sizeof(buf1), 0x44);
1061
1062 iov[0].iov_base = buf0;
1063 iov[0].iov_len = sizeof(buf0);
1064 iov[1].iov_base = buf1;
1065 iov[1].iov_len = sizeof(buf1);
1066 msg.num_handles = 0;
1067 msg.handles = NULL;
1068 msg.num_iov = 2;
1069 msg.iov = iov;
1070
1071 /* open connection to datasink service */
1072 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "datasink");
1073 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
1074 EXPECT_GT_ZERO(rc, "connect to datasink");
1075
1076 if (rc >= 0) {
1077 chan = (handle_t)rc;
1078 for (unsigned int i = 0; i < 10000; i++) {
1079 rc = send_msg(chan, &msg);
1080 if (rc == ERR_NOT_ENOUGH_BUFFER) { /* wait for room */
1081 uevent_t uevt;
1082 unsigned int exp_event = IPC_HANDLE_POLL_SEND_UNBLOCKED;
1083 rc = wait(chan, &uevt, 1000);
1084 EXPECT_EQ(NO_ERROR, rc, "waiting for space");
1085 EXPECT_EQ(chan, uevt.handle, "waiting for space");
1086 EXPECT_EQ(exp_event, uevt.event, "waiting for space");
1087 } else {
1088 EXPECT_EQ(64, rc, "send_msg bulk")
1089 }
1090 if (HasFailure()) {
1091 TLOGI("%s: abort (rc = %d) test\n", __func__, rc);
1092 break;
1093 }
1094 }
1095 rc = close(chan);
1096 EXPECT_EQ(NO_ERROR, rc, "close channel");
1097 }
1098 }
1099
TEST(ipc,DISABLED_WITH_COVERAGE (send_msg_negative))1100 TEST(ipc, DISABLED_WITH_COVERAGE(send_msg_negative)) {
1101 int rc;
1102 handle_t port;
1103 handle_t chan;
1104 char path[MAX_PORT_PATH_LEN];
1105 uint8_t buf[64];
1106 struct iovec iov[2];
1107 ipc_msg_t msg;
1108
1109 /* init msg to empty message */
1110 memset(&msg, 0, sizeof(msg));
1111
1112 /* send_msg on invalid (negative value) handle */
1113 rc = send_msg(INVALID_IPC_HANDLE, &msg);
1114 EXPECT_EQ(ERR_BAD_HANDLE, rc, "send_msg on invalid handle");
1115
1116 /* calling send_msg with NULL msg should fail for any handle */
1117 rc = send_msg(INVALID_IPC_HANDLE, NULL);
1118 EXPECT_EQ(ERR_FAULT, rc, "send_msg on NULL msg");
1119
1120 /*
1121 * calling send_msg on an invalid (out of range) handle
1122 *
1123 * check handling of the following cases:
1124 * - handle is on the upper boundary of valid handle range
1125 * - handle is above of the upper boundary of valid handle range
1126 * - handle is below of valid handle range
1127 *
1128 * in all cases, the expected result is ERR_BAD_HANDLE error.
1129 */
1130 rc = send_msg(handle_base + MAX_USER_HANDLES, &msg);
1131 EXPECT_EQ(ERR_BAD_HANDLE, rc, "send_msg on invalid handle");
1132
1133 rc = send_msg(handle_base + MAX_USER_HANDLES + 1, &msg);
1134 EXPECT_EQ(ERR_BAD_HANDLE, rc, "send_msg on invalid handle");
1135
1136 rc = send_msg(handle_base - 1, &msg);
1137 EXPECT_EQ(ERR_BAD_HANDLE, rc, "send_msg on invalid handle");
1138
1139 /* calling send_msg with NULL msg should fail for any handle */
1140 rc = send_msg(MAX_USER_HANDLES, NULL);
1141 EXPECT_EQ(ERR_FAULT, rc, "send_msg on NULL msg");
1142
1143 /* send_msg on non-existing handle that is in valid range */
1144 for (unsigned int i = first_free_handle_index; i < MAX_USER_HANDLES; i++) {
1145 rc = send_msg(handle_base + i, &msg);
1146 EXPECT_EQ(ERR_NOT_FOUND, rc, "send on invalid handle");
1147
1148 /* calling send_msg with NULL msg should fail for any handle */
1149 rc = send_msg(handle_base + i, NULL);
1150 EXPECT_EQ(ERR_FAULT, rc, "send_msg on NULL msg");
1151 }
1152
1153 /* calling send_msg on port handle should fail
1154 because send_msg is only applicable to channels */
1155 sprintf(path, "%s.main.%s", SRV_PATH_BASE, "datasink");
1156 rc = port_create(path, 2, MAX_PORT_BUF_SIZE, IPC_PORT_ALLOW_TA_CONNECT);
1157 EXPECT_GT_ZERO(rc, "create datasink port");
1158 port = (handle_t)rc;
1159
1160 rc = send_msg(port, &msg);
1161 EXPECT_EQ(ERR_INVALID_ARGS, rc, "send_msg on port");
1162 close(port);
1163
1164 /* open connection to datasink service */
1165 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "datasink");
1166 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
1167 EXPECT_GT_ZERO(rc, "connect to datasink");
1168 chan = (handle_t)rc;
1169
1170 /* send message with handles pointing to NULL */
1171 msg.num_handles = 1;
1172 msg.handles = NULL;
1173 rc = send_msg(chan, &msg);
1174 EXPECT_EQ(ERR_FAULT, rc, "sending handles");
1175
1176 /* reset handles */
1177 msg.num_handles = 0;
1178 msg.handles = NULL;
1179
1180 /* set num_iov to non zero but keep iov ptr NULL */
1181 msg.num_iov = 1;
1182 msg.iov = NULL;
1183 rc = send_msg(chan, &msg);
1184 EXPECT_EQ(ERR_FAULT, rc, "sending bad iovec array");
1185
1186 /* send msg with iovec with bad base ptr */
1187 iov[0].iov_len = sizeof(buf) / 2;
1188 iov[0].iov_base = NULL;
1189 iov[1].iov_len = sizeof(buf) / 2;
1190 iov[1].iov_base = NULL;
1191 msg.num_iov = 2;
1192 msg.iov = iov;
1193 rc = send_msg(chan, &msg);
1194 EXPECT_EQ(ERR_FAULT, rc, "sending bad iovec");
1195
1196 /* send msg with iovec with bad base ptr */
1197 iov[0].iov_len = sizeof(buf) / 2;
1198 iov[0].iov_base = buf;
1199 iov[1].iov_len = sizeof(buf) / 2;
1200 iov[1].iov_base = NULL;
1201 msg.num_iov = 2;
1202 msg.iov = iov;
1203 rc = send_msg(chan, &msg);
1204 EXPECT_EQ(ERR_FAULT, rc, "sending bad iovec");
1205
1206 rc = close(chan);
1207 EXPECT_EQ(NO_ERROR, rc, "close channel");
1208 }
1209
TEST(ipc,DISABLED_WITH_COVERAGE (read_msg_negative))1210 TEST(ipc, DISABLED_WITH_COVERAGE(read_msg_negative)) {
1211 int rc;
1212 handle_t port;
1213 handle_t chan;
1214 uevent_t uevt;
1215 char path[MAX_PORT_PATH_LEN];
1216 uint8_t tx_buf[64];
1217 uint8_t rx_buf[64];
1218 ipc_msg_info_t inf;
1219 ipc_msg_t tx_msg;
1220 struct iovec tx_iov;
1221 ipc_msg_t rx_msg;
1222 struct iovec rx_iov[2];
1223
1224 /* init msg to empty message */
1225 memset(&rx_msg, 0, sizeof(rx_msg));
1226 memset(&tx_msg, 0, sizeof(tx_msg));
1227
1228 /* read_msg on invalid (negative value) handle */
1229 rc = read_msg(INVALID_IPC_HANDLE, 0, 0, &rx_msg);
1230 EXPECT_EQ(ERR_BAD_HANDLE, rc, "read_msg on invalid handle");
1231
1232 rc = read_msg(INVALID_IPC_HANDLE, 0, 0, NULL);
1233 EXPECT_EQ(ERR_FAULT, rc, "read_msg on invalid handle");
1234
1235 /*
1236 * calling read_msg on an invalid (out of range) handle
1237 *
1238 * check handling of the following cases:
1239 * - handle is on the upper boundary of valid handle range
1240 * - handle is above of the upper boundary of valid handle range
1241 * - handle is below of valid handle range
1242 *
1243 * in all cases, the expected result is ERR_BAD_HANDLE error.
1244 */
1245 rc = read_msg(handle_base + MAX_USER_HANDLES, 0, 0, &rx_msg);
1246 EXPECT_EQ(ERR_BAD_HANDLE, rc, "read_msg on bad handle");
1247
1248 rc = read_msg(handle_base + MAX_USER_HANDLES + 1, 0, 0, &rx_msg);
1249 EXPECT_EQ(ERR_BAD_HANDLE, rc, "read_msg on bad handle");
1250
1251 rc = read_msg(handle_base - 1, 0, 0, &rx_msg);
1252 EXPECT_EQ(ERR_BAD_HANDLE, rc, "read_msg on bad handle");
1253
1254 /* calling read_msg with NULL msg should fail for any handle */
1255 rc = read_msg(handle_base + MAX_USER_HANDLES, 0, 0, NULL);
1256 EXPECT_EQ(ERR_FAULT, rc, "read_msg on NULL msg");
1257
1258 /* send_msg on non-existing handle that is in valid range */
1259 for (unsigned int i = first_free_handle_index; i < MAX_USER_HANDLES; i++) {
1260 rc = read_msg(handle_base + i, 0, 0, &rx_msg);
1261 EXPECT_EQ(ERR_NOT_FOUND, rc, "read_msg on non existing handle");
1262
1263 /* calling send_msg with NULL msg should fail for any handle */
1264 rc = read_msg(handle_base + i, 0, 0, NULL);
1265 EXPECT_EQ(ERR_FAULT, rc, "read_msg on NULL msg");
1266 }
1267
1268 /* calling read_msg on port handle should fail
1269 because read_msg is only applicable to channels */
1270 sprintf(path, "%s.main.%s", SRV_PATH_BASE, "datasink");
1271 rc = port_create(path, 2, MAX_PORT_BUF_SIZE, IPC_PORT_ALLOW_TA_CONNECT);
1272 EXPECT_GT_ZERO(rc, "create datasink port");
1273 port = (handle_t)rc;
1274
1275 rc = read_msg(port, 0, 0, &rx_msg);
1276 EXPECT_EQ(ERR_INVALID_ARGS, rc, "read_msg on port");
1277 close(port);
1278
1279 /* open connection to echo service */
1280 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "echo");
1281 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
1282 EXPECT_GT_ZERO(rc, "connect to datasink");
1283 chan = (handle_t)rc;
1284
1285 /* NULL msg on valid channel */
1286 rc = read_msg(chan, 0, 0, NULL);
1287 EXPECT_EQ(ERR_FAULT, rc, "read_msg on NULL msg");
1288
1289 /* read_msg on invalid msg id */
1290 rc = read_msg(chan, 0, 0, &rx_msg);
1291 EXPECT_EQ(ERR_INVALID_ARGS, rc, "read_msg on invalid msg id");
1292
1293 rc = read_msg(chan, 1000, 0, &rx_msg);
1294 EXPECT_EQ(ERR_INVALID_ARGS, rc, "read_msg on invalid msg id");
1295
1296 /* send a message to echo service */
1297 memset(tx_buf, 0x55, sizeof(tx_buf));
1298 tx_iov.iov_base = tx_buf;
1299 tx_iov.iov_len = sizeof(tx_buf);
1300 tx_msg.num_iov = 1;
1301 tx_msg.iov = &tx_iov;
1302 tx_msg.num_handles = 0;
1303 tx_msg.handles = NULL;
1304
1305 rc = send_msg(chan, &tx_msg);
1306 EXPECT_EQ(64, rc, "sending msg to echo");
1307
1308 /* and wait for response */
1309 rc = wait(chan, &uevt, 1000);
1310 EXPECT_EQ(NO_ERROR, rc, "waiting on echo response");
1311 EXPECT_EQ(chan, uevt.handle, "wait on channel");
1312
1313 rc = get_msg(chan, &inf);
1314 EXPECT_EQ(NO_ERROR, rc, "getting echo msg");
1315 EXPECT_EQ(sizeof(tx_buf), inf.len, "echo message reply length");
1316
1317 /* now we have valid message with valid id */
1318
1319 rx_iov[0].iov_len = sizeof(rx_buf) / 2;
1320 rx_iov[1].iov_len = sizeof(rx_buf) / 2;
1321
1322 /* read message with invalid iovec array */
1323 rx_msg.iov = NULL;
1324 rx_msg.num_iov = 2;
1325 rc = read_msg(chan, inf.id, 0, &rx_msg);
1326 EXPECT_EQ(ERR_FAULT, rc, "read with invalid iovec array");
1327
1328 /* read with invalid iovec entry */
1329 rx_iov[0].iov_base = NULL;
1330 rx_iov[1].iov_base = NULL;
1331 rx_msg.iov = rx_iov;
1332 rc = read_msg(chan, inf.id, 0, &rx_msg);
1333 EXPECT_EQ(ERR_FAULT, rc, "read with invalid iovec");
1334
1335 rx_iov[0].iov_base = rx_buf;
1336 rx_iov[1].iov_base = NULL;
1337 rc = read_msg(chan, inf.id, 0, &rx_msg);
1338 EXPECT_EQ(ERR_FAULT, rc, "read with invalid iovec");
1339
1340 rx_iov[0].iov_base = rx_buf;
1341 rx_iov[1].iov_base = rx_buf + sizeof(rx_buf) / 2;
1342
1343 /* read with invalid offset with valid iovec array */
1344 rc = read_msg(chan, inf.id, inf.len + 1, &rx_msg);
1345 EXPECT_EQ(ERR_INVALID_ARGS, rc, "read with invalid offset");
1346
1347 /* cleanup */
1348 rc = put_msg(chan, inf.id);
1349 EXPECT_EQ(NO_ERROR, rc, "putting echo msg");
1350
1351 rc = close(chan);
1352 EXPECT_EQ(NO_ERROR, rc, "close channel");
1353 }
1354
TEST(ipc,end_to_end_msg)1355 TEST(ipc, end_to_end_msg) {
1356 int rc;
1357 handle_t chan;
1358 uevent_t uevt;
1359 char path[MAX_PORT_PATH_LEN];
1360 uint8_t tx_buf[64];
1361 uint8_t rx_buf[64];
1362 ipc_msg_info_t inf;
1363 ipc_msg_t tx_msg;
1364 struct iovec tx_iov;
1365 ipc_msg_t rx_msg;
1366 struct iovec rx_iov;
1367
1368 tx_iov.iov_base = tx_buf;
1369 tx_iov.iov_len = sizeof(tx_buf);
1370 tx_msg.num_iov = 1;
1371 tx_msg.iov = &tx_iov;
1372 tx_msg.num_handles = 0;
1373 tx_msg.handles = NULL;
1374
1375 rx_iov.iov_base = rx_buf;
1376 rx_iov.iov_len = sizeof(rx_buf);
1377 rx_msg.num_iov = 1;
1378 rx_msg.iov = &rx_iov;
1379 rx_msg.num_handles = 0;
1380 rx_msg.handles = NULL;
1381
1382 memset(tx_buf, 0x55, sizeof(tx_buf));
1383 memset(rx_buf, 0xaa, sizeof(rx_buf));
1384
1385 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "echo");
1386 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
1387 EXPECT_GT_ZERO(rc, "connect to echo");
1388
1389 if (rc >= 0) {
1390 unsigned int tx_cnt = 0;
1391 unsigned int rx_cnt = 0;
1392
1393 chan = (handle_t)rc;
1394
1395 /* send 10000 messages synchronously, waiting for reply
1396 for each one
1397 */
1398 tx_cnt = 10000;
1399 while (tx_cnt) {
1400 /* send a message */
1401 rc = send_msg(chan, &tx_msg);
1402 EXPECT_EQ(64, rc, "sending msg to echo");
1403
1404 /* wait for response */
1405 rc = wait(chan, &uevt, 1000);
1406 EXPECT_EQ(NO_ERROR, rc, "waiting on echo response");
1407 EXPECT_EQ(chan, uevt.handle, "wait on channel");
1408
1409 /* get a reply */
1410 rc = get_msg(chan, &inf);
1411 EXPECT_EQ(NO_ERROR, rc, "getting echo msg");
1412
1413 /* read reply data */
1414 rc = read_msg(chan, inf.id, 0, &rx_msg);
1415 EXPECT_EQ(64, rc, "reading echo msg");
1416
1417 /* discard reply */
1418 rc = put_msg(chan, inf.id);
1419 EXPECT_EQ(NO_ERROR, rc, "putting echo msg");
1420
1421 tx_cnt--;
1422 }
1423
1424 /* send/receive 10000 messages asynchronously. */
1425 rx_cnt = tx_cnt = 10000;
1426 while (tx_cnt || rx_cnt) {
1427 /* send messages until all buffers are full */
1428 while (tx_cnt) {
1429 rc = send_msg(chan, &tx_msg);
1430 if (rc == ERR_NOT_ENOUGH_BUFFER)
1431 break; /* no more space */
1432 EXPECT_EQ(64, rc, "sending msg to echo");
1433 if (rc != 64)
1434 goto abort_test;
1435 tx_cnt--;
1436 }
1437
1438 /* wait for reply msg or room */
1439 rc = wait(chan, &uevt, 1000);
1440 EXPECT_EQ(NO_ERROR, rc, "waiting for reply");
1441 EXPECT_EQ(chan, uevt.handle, "wait on channel");
1442
1443 /* drain all messages */
1444 while (rx_cnt) {
1445 /* get a reply */
1446 rc = get_msg(chan, &inf);
1447 if (rc == ERR_NO_MSG)
1448 break; /* no more messages */
1449
1450 EXPECT_EQ(NO_ERROR, rc, "getting echo msg");
1451
1452 /* read reply data */
1453 rc = read_msg(chan, inf.id, 0, &rx_msg);
1454 EXPECT_EQ(64, rc, "reading echo msg");
1455
1456 /* discard reply */
1457 rc = put_msg(chan, inf.id);
1458 EXPECT_EQ(NO_ERROR, rc, "putting echo msg");
1459
1460 rx_cnt--;
1461 }
1462
1463 if (HasFailure())
1464 break;
1465 }
1466
1467 abort_test:
1468 EXPECT_EQ(0, tx_cnt, "tx_cnt");
1469 EXPECT_EQ(0, rx_cnt, "rx_cnt");
1470
1471 rc = close(chan);
1472 EXPECT_EQ(NO_ERROR, rc, "close channel");
1473 }
1474 }
1475
1476 /****************************************************************************/
1477
TEST(ipc,hset_create)1478 TEST(ipc, hset_create) {
1479 handle_t hset1;
1480 handle_t hset2;
1481
1482 hset1 = handle_set_create();
1483 EXPECT_GE_ZERO((int)hset1, "create handle set1");
1484
1485 hset2 = handle_set_create();
1486 EXPECT_GE_ZERO((int)hset2, "create handle set2");
1487
1488 close(hset1);
1489 close(hset2);
1490 }
1491
TEST(ipc,hset_add_mod_del)1492 TEST(ipc, hset_add_mod_del) {
1493 int rc;
1494 handle_t hset1;
1495 handle_t hset2;
1496
1497 hset1 = handle_set_create();
1498 EXPECT_GE_ZERO((int)hset1, "create handle set1");
1499
1500 hset2 = handle_set_create();
1501 EXPECT_GE_ZERO((int)hset2, "create handle set2");
1502
1503 ABORT_IF_NOT_OK(abort_test);
1504
1505 uevent_t evt = {
1506 .handle = hset2,
1507 .event = ~0U,
1508 .cookie = NULL,
1509 };
1510
1511 /* add handle to handle set */
1512 rc = handle_set_ctrl(hset1, HSET_ADD, &evt);
1513 EXPECT_EQ(0, rc, "hset add");
1514
1515 /* modify handle attributes in handle set */
1516 rc = handle_set_ctrl(hset1, HSET_MOD, &evt);
1517 EXPECT_EQ(0, rc, "hset mod");
1518
1519 /* remove handle from handle set */
1520 rc = handle_set_ctrl(hset1, HSET_DEL, &evt);
1521 EXPECT_EQ(0, rc, "hset del");
1522
1523 abort_test:
1524 close(hset1);
1525 close(hset2);
1526 }
1527
TEST(ipc,hset_add_self)1528 TEST(ipc, hset_add_self) {
1529 int rc;
1530 handle_t hset1;
1531 handle_t hset1_dup;
1532
1533 hset1 = handle_set_create();
1534 EXPECT_GE_ZERO((int)hset1, "create handle set1");
1535
1536 hset1_dup = dup(hset1);
1537 EXPECT_GE_ZERO((int)hset1_dup, "duplicate handle set1");
1538
1539 ABORT_IF_NOT_OK(abort_test);
1540
1541 uevent_t evt = {
1542 .handle = hset1,
1543 .event = ~0U,
1544 .cookie = NULL,
1545 };
1546
1547 /* add handle to handle set */
1548 rc = handle_set_ctrl(hset1, HSET_ADD, &evt);
1549 EXPECT_EQ(ERR_INVALID_ARGS, rc, "hset add self");
1550
1551 /* add handle to handle set */
1552 evt.handle = hset1_dup;
1553 rc = handle_set_ctrl(hset1, HSET_ADD, &evt);
1554 EXPECT_EQ(ERR_INVALID_ARGS, rc, "hset add duplicate of self");
1555
1556 abort_test:
1557 close(hset1);
1558 close(hset1_dup);
1559 }
1560
TEST(ipc,hset_add_loop)1561 TEST(ipc, hset_add_loop) {
1562 int rc;
1563 handle_t hset1;
1564 handle_t hset2;
1565 handle_t hset3;
1566
1567 hset1 = handle_set_create();
1568 EXPECT_GE_ZERO((int)hset1, "create handle set1");
1569
1570 hset2 = handle_set_create();
1571 EXPECT_GE_ZERO((int)hset2, "create handle set2");
1572
1573 hset3 = handle_set_create();
1574 EXPECT_GE_ZERO((int)hset3, "create handle set3");
1575
1576 ABORT_IF_NOT_OK(abort_test);
1577
1578 uevent_t evt = {
1579 .handle = hset2,
1580 .event = ~0U,
1581 .cookie = NULL,
1582 };
1583
1584 /* add hset2 to hset1 */
1585 rc = handle_set_ctrl(hset1, HSET_ADD, &evt);
1586 EXPECT_EQ(0, rc, "add hset2 to hset1");
1587
1588 /* add hset3 to hset2 */
1589 evt.handle = hset3;
1590 rc = handle_set_ctrl(hset2, HSET_ADD, &evt);
1591 EXPECT_EQ(0, rc, "add hset3 to hset2");
1592
1593 /* add hset1 to hset3 */
1594 evt.handle = hset1;
1595 rc = handle_set_ctrl(hset3, HSET_ADD, &evt);
1596 EXPECT_EQ(ERR_INVALID_ARGS, rc, "add hset1 to hset3");
1597
1598 abort_test:
1599 close(hset2);
1600 close(hset1);
1601 close(hset3);
1602 }
1603
TEST(ipc,hset_add_duplicate)1604 TEST(ipc, hset_add_duplicate) {
1605 int rc;
1606 handle_t hset1;
1607 handle_t hset2;
1608 handle_t hset2_dup;
1609
1610 hset1 = handle_set_create();
1611 EXPECT_GE_ZERO((int)hset1, "create handle set1");
1612
1613 hset2 = handle_set_create();
1614 EXPECT_GE_ZERO((int)hset2, "create handle set2");
1615
1616 hset2_dup = dup(hset2);
1617 EXPECT_GE_ZERO((int)hset2_dup, "duplicate handle set2");
1618
1619 ABORT_IF_NOT_OK(abort_test);
1620
1621 uevent_t evt = {
1622 .handle = hset2,
1623 .event = ~0U,
1624 .cookie = NULL,
1625 };
1626
1627 /* add hset2 to hset1 */
1628 rc = handle_set_ctrl(hset1, HSET_ADD, &evt);
1629 EXPECT_EQ(0, rc, "add hset2 to hset1");
1630
1631 /* add hset2 to hset1 again */
1632 rc = handle_set_ctrl(hset1, HSET_ADD, &evt);
1633 EXPECT_EQ(ERR_ALREADY_EXISTS, rc, "add hset2 to hset1");
1634
1635 /* add duplicate of hset2 to hset1 */
1636 evt.handle = hset2_dup;
1637 rc = handle_set_ctrl(hset1, HSET_ADD, &evt);
1638 EXPECT_EQ(0, rc, "add hset2 duplicate to hset1");
1639
1640 abort_test:
1641 close(hset1);
1642 close(hset2);
1643 close(hset2_dup);
1644 }
1645
TEST(ipc,hset_wait_on_empty_set)1646 TEST(ipc, hset_wait_on_empty_set) {
1647 int rc;
1648 uevent_t evt;
1649 handle_t hset1;
1650
1651 hset1 = handle_set_create();
1652 EXPECT_GE_ZERO((int)hset1, "create hset");
1653
1654 ABORT_IF_NOT_OK(abort_test);
1655
1656 /* wait with zero timeout */
1657 rc = wait(hset1, &evt, 0);
1658 EXPECT_EQ(ERR_NOT_FOUND, rc, "wait on empty hset");
1659
1660 /* wait with non-zero timeout */
1661 rc = wait(hset1, &evt, 100);
1662 EXPECT_EQ(ERR_NOT_FOUND, rc, "wait on empty hset");
1663
1664 close(hset1);
1665
1666 abort_test:;
1667 }
1668
TEST(ipc,hset_wait_on_non_empty_set)1669 TEST(ipc, hset_wait_on_non_empty_set) {
1670 int rc;
1671 handle_t hset1;
1672 handle_t hset2;
1673
1674 hset1 = handle_set_create();
1675 EXPECT_GE_ZERO((int)hset1, "create handle set1");
1676
1677 hset2 = handle_set_create();
1678 EXPECT_GE_ZERO((int)hset2, "create handle set2");
1679
1680 ABORT_IF_NOT_OK(abort_test);
1681
1682 uevent_t evt = {
1683 .handle = hset2,
1684 .event = ~0U,
1685 .cookie = NULL,
1686 };
1687
1688 /* add hset2 to hset1 */
1689 rc = handle_set_ctrl(hset1, HSET_ADD, &evt);
1690 EXPECT_EQ(0, rc, "add hset2 to hset1");
1691
1692 /* wait with zero timeout on hset1 */
1693 rc = wait(hset1, &evt, 0);
1694 EXPECT_EQ(ERR_TIMED_OUT, rc, "wait on non-empty hset");
1695
1696 /* wait with non-zero timeout on hset1 */
1697 rc = wait(hset1, &evt, 100);
1698 EXPECT_EQ(ERR_TIMED_OUT, rc, "wait on non-empty hset");
1699
1700 abort_test:
1701 close(hset1);
1702 close(hset2);
1703 }
1704
TEST(ipc,hset_wait_on_duplicate_in_set)1705 TEST(ipc, hset_wait_on_duplicate_in_set) {
1706 int rc;
1707 handle_t hset1;
1708 handle_t hset2;
1709 handle_t hset2_dup;
1710
1711 hset1 = handle_set_create();
1712 EXPECT_GE_ZERO((int)hset1, "create handle set1");
1713
1714 hset2 = handle_set_create();
1715 EXPECT_GE_ZERO((int)hset2, "create handle set2");
1716
1717 hset2_dup = dup(hset2);
1718 EXPECT_GE_ZERO((int)hset2_dup, "duplicate handle set2");
1719
1720 ABORT_IF_NOT_OK(abort_test);
1721
1722 uevent_t evt = {
1723 .handle = hset2,
1724 .event = ~0U,
1725 .cookie = NULL,
1726 };
1727
1728 /* add hset2 to hset1 */
1729 rc = handle_set_ctrl(hset1, HSET_ADD, &evt);
1730 EXPECT_EQ(NO_ERROR, rc, "add hset2 to hset1");
1731
1732 /* add hset2_dup to hset1 */
1733 evt.handle = hset2_dup;
1734 rc = handle_set_ctrl(hset1, HSET_ADD, &evt);
1735 EXPECT_EQ(NO_ERROR, rc, "add hset2_dup to hset1");
1736
1737 /* wait with zero timeout on hset1 */
1738 rc = wait(hset1, &evt, 0);
1739 EXPECT_EQ(ERR_TIMED_OUT, rc, "wait on non-empty hset");
1740
1741 /* wait with non-zero timeout on hset1 */
1742 rc = wait(hset1, &evt, 100);
1743 EXPECT_EQ(ERR_TIMED_OUT, rc, "wait on non-empty hset");
1744
1745 /* remove handle from handle set */
1746 evt.handle = hset2;
1747 rc = handle_set_ctrl(hset1, HSET_DEL, &evt);
1748 EXPECT_EQ(NO_ERROR, rc, "hset del");
1749
1750 /* wait with zero timeout on hset1 */
1751 rc = wait(hset1, &evt, 0);
1752 EXPECT_EQ(ERR_TIMED_OUT, rc, "wait on non-empty hset");
1753
1754 /* wait with non-zero timeout on hset1 */
1755 rc = wait(hset1, &evt, 100);
1756 EXPECT_EQ(ERR_TIMED_OUT, rc, "wait on non-empty hset");
1757
1758 /* remove duplicate from handle set */
1759 evt.handle = hset2_dup;
1760 rc = handle_set_ctrl(hset1, HSET_DEL, &evt);
1761 EXPECT_EQ(NO_ERROR, rc, "hset del");
1762
1763 /* wait with zero timeout */
1764 rc = wait(hset1, &evt, 0);
1765 EXPECT_EQ(ERR_NOT_FOUND, rc, "wait on empty hset");
1766
1767 /* wait with non-zero timeout */
1768 rc = wait(hset1, &evt, 100);
1769 EXPECT_EQ(ERR_NOT_FOUND, rc, "wait on empty hset");
1770
1771 abort_test:
1772 close(hset1);
1773 close(hset2);
1774 close(hset2_dup);
1775 }
1776
TEST(ipc,DISABLED_WITH_COVERAGE (dup_no_resources))1777 TEST(ipc, DISABLED_WITH_COVERAGE(dup_no_resources)) {
1778 int rc;
1779 handle_t hsets[MAX_USER_HANDLES];
1780
1781 /* create maximum number of handle sets */
1782 for (unsigned int i = first_free_handle_index; i < MAX_USER_HANDLES; i++) {
1783 hsets[i] = handle_set_create();
1784 EXPECT_GE_ZERO((int)hsets[i], "create handle set");
1785 }
1786
1787 rc = dup(hsets[first_free_handle_index]);
1788 EXPECT_EQ(ERR_NO_RESOURCES, rc, "no more handles");
1789
1790 /* free 1 handle so we have room and repeat test */
1791 rc = close(hsets[first_free_handle_index]);
1792 EXPECT_EQ(NO_ERROR, rc, "close one set");
1793 hsets[first_free_handle_index] = INVALID_IPC_HANDLE;
1794
1795 /* the only free handle here should be first_free_handle_index */
1796 rc = dup(hsets[first_free_handle_index + 1]);
1797 EXPECT_EQ(handle_base + first_free_handle_index, rc);
1798 close(rc);
1799
1800 /* close them all */
1801 for (unsigned int i = first_free_handle_index + 1; i < MAX_USER_HANDLES;
1802 i++) {
1803 /* close a valid set */
1804 rc = close(hsets[i]);
1805 EXPECT_EQ(NO_ERROR, rc, "close set");
1806 hsets[i] = INVALID_IPC_HANDLE;
1807 }
1808 }
1809
1810 /*
1811 * Disabled per b/140836874 - believed to be a race in the test code, not
1812 * in IPC.
1813 */
TEST(ipc,DISABLED_hset_add_chan)1814 TEST(ipc, DISABLED_hset_add_chan) {
1815 int rc;
1816 uevent_t evt;
1817 handle_t hset1;
1818 handle_t hset2;
1819 handle_t chan1;
1820 handle_t chan2;
1821 void* cookie1 = (void*)"cookie1";
1822 void* cookie2 = (void*)"cookie2";
1823 void* cookie11 = (void*)"cookie11";
1824 void* cookie12 = (void*)"cookie12";
1825 void* cookie21 = (void*)"cookie21";
1826 void* cookie22 = (void*)"cookie22";
1827 void* cookiehs2 = (void*)"cookiehs2";
1828 uint8_t buf0[64];
1829 struct iovec iov;
1830 ipc_msg_t msg;
1831
1832 /* prepare test buffer */
1833 fill_test_buf(buf0, sizeof(buf0), 0x55);
1834
1835 chan1 = connect(SRV_PATH_BASE ".srv.echo", IPC_CONNECT_WAIT_FOR_PORT);
1836 EXPECT_GT_ZERO((int)chan1, "connect to echo chan1");
1837
1838 rc = set_cookie(chan1, cookie1);
1839 EXPECT_EQ(0, rc, "cookie1");
1840
1841 chan2 = connect(SRV_PATH_BASE ".srv.echo", IPC_CONNECT_WAIT_FOR_PORT);
1842 EXPECT_GT_ZERO((int)chan2, "connect to echo chan2");
1843
1844 rc = set_cookie(chan2, cookie2);
1845 EXPECT_EQ(0, rc, "cookie2");
1846
1847 /* send message over chan1 and chan2 */
1848 iov.iov_base = buf0;
1849 iov.iov_len = sizeof(buf0);
1850 msg.num_handles = 0;
1851 msg.handles = NULL;
1852 msg.num_iov = 1;
1853 msg.iov = &iov;
1854
1855 rc = send_msg(chan1, &msg);
1856 EXPECT_EQ(64, rc, "send over chan1");
1857
1858 rc = send_msg(chan2, &msg);
1859 EXPECT_EQ(64, rc, "send over chan2");
1860
1861 hset1 = handle_set_create();
1862 EXPECT_GE_ZERO((int)hset1, "create handle set1");
1863
1864 hset2 = handle_set_create();
1865 EXPECT_GE_ZERO((int)hset2, "create handle set2");
1866
1867 ABORT_IF_NOT_OK(abort_test);
1868
1869 /* chan1 to hset2 */
1870 evt.handle = chan1;
1871 evt.event = ~0U;
1872 evt.cookie = cookie12;
1873 rc = handle_set_ctrl(hset2, HSET_ADD, &evt);
1874 EXPECT_EQ(0, rc, "add hset2 to hset1");
1875
1876 /* chan2 to hset2 */
1877 evt.handle = chan2;
1878 evt.event = ~0U;
1879 evt.cookie = cookie22;
1880 rc = handle_set_ctrl(hset2, HSET_ADD, &evt);
1881 EXPECT_EQ(0, rc, "add hset2 to hset1");
1882
1883 /* add hset2 to hset1 */
1884 evt.handle = hset2;
1885 evt.event = ~0U;
1886 evt.cookie = cookiehs2;
1887 rc = handle_set_ctrl(hset1, HSET_ADD, &evt);
1888 EXPECT_EQ(0, rc, "add hset2 to hset1");
1889
1890 /* chan1 to hset1 */
1891 evt.handle = chan1;
1892 evt.event = ~0U;
1893 evt.cookie = cookie11;
1894 rc = handle_set_ctrl(hset1, HSET_ADD, &evt);
1895 EXPECT_EQ(0, rc, "add hset2 to hset1");
1896
1897 /* chan2 to hset1 */
1898 evt.handle = chan2;
1899 evt.event = ~0U;
1900 evt.cookie = cookie21;
1901 rc = handle_set_ctrl(hset1, HSET_ADD, &evt);
1902 EXPECT_EQ(0, rc, "add hset2 to hset1");
1903
1904 /* wait on chan1 directly */
1905 rc = wait(chan1, &evt, 1000);
1906 EXPECT_EQ(0, rc, "wait on chan1");
1907 EXPECT_EQ(chan1, evt.handle, "event.handle");
1908 EXPECT_EQ(cookie1, evt.cookie, "event.cookie");
1909
1910 /* wait on chan2 directly */
1911 rc = wait(chan2, &evt, 1000);
1912 EXPECT_EQ(0, rc, "wait on chan2");
1913 EXPECT_EQ(chan2, evt.handle, "event.handle");
1914 EXPECT_EQ(cookie2, evt.cookie, "event.cookie");
1915
1916 /* wait on hset1 */
1917 rc = wait(hset1, &evt, 1000);
1918 EXPECT_EQ(0, rc, "wait on hset1");
1919 EXPECT_EQ(hset2, evt.handle, "event.handle");
1920 EXPECT_EQ(cookiehs2, evt.cookie, "event.cookie");
1921
1922 /* wait on hset1 again (chan1 should be ready) */
1923 rc = wait(hset1, &evt, 1000);
1924 EXPECT_EQ(0, rc, "wait on hset1");
1925 EXPECT_EQ(chan1, evt.handle, "event.handle");
1926 EXPECT_EQ(cookie11, evt.cookie, "event.cookie");
1927
1928 /* wait on hset1 again (chan2 should be ready) */
1929 rc = wait(hset1, &evt, 1000);
1930 EXPECT_EQ(0, rc, "wait on hset1");
1931 EXPECT_EQ(chan2, evt.handle, "event.handle");
1932 EXPECT_EQ(cookie21, evt.cookie, "event.cookie");
1933
1934 /* wait on hset1 again (hset2 should be ready) */
1935 rc = wait(hset1, &evt, 1000);
1936 EXPECT_EQ(0, rc, "wait on hset1");
1937 EXPECT_EQ(hset2, evt.handle, "event.handle");
1938 EXPECT_EQ(cookiehs2, evt.cookie, "event.cookie");
1939
1940 /* wait on hset2 (chan1 should be ready) */
1941 rc = wait(hset2, &evt, 1000);
1942 EXPECT_EQ(0, rc, "wait on hset2");
1943 EXPECT_EQ(chan1, evt.handle, "event.handle");
1944 EXPECT_EQ(cookie12, evt.cookie, "event.cookie");
1945
1946 /* wait on hset2 again (chan2 should be ready) */
1947 rc = wait(hset2, &evt, 1000);
1948 EXPECT_EQ(0, rc, "wait on hset2");
1949 EXPECT_EQ(chan2, evt.handle, "event.handle");
1950 EXPECT_EQ(cookie22, evt.cookie, "event.cookie");
1951
1952 /* wait on hset2 again (chan1 should be ready) */
1953 rc = wait(hset2, &evt, 1000);
1954 EXPECT_EQ(0, rc, "wait on chan1");
1955 EXPECT_EQ(chan1, evt.handle, "event.handle");
1956 EXPECT_EQ(cookie12, evt.cookie, "event.cookie");
1957
1958 abort_test:
1959 close(chan1);
1960 close(chan2);
1961 close(hset1);
1962 close(hset2);
1963 }
1964
TEST(ipc,hset_event_mask)1965 TEST(ipc, hset_event_mask) {
1966 int rc;
1967 uevent_t evt;
1968 handle_t hset1;
1969 handle_t chan1;
1970 void* cookie1 = (void*)"cookie1";
1971 void* cookie11 = (void*)"cookie11";
1972 uint8_t buf0[64];
1973 struct iovec iov;
1974 ipc_msg_t msg;
1975
1976 /* prepare test buffer */
1977 fill_test_buf(buf0, sizeof(buf0), 0x55);
1978
1979 chan1 = connect(SRV_PATH_BASE ".srv.echo", IPC_CONNECT_WAIT_FOR_PORT);
1980 EXPECT_GT_ZERO((int)chan1, "connect to echo");
1981
1982 rc = set_cookie(chan1, cookie1);
1983 EXPECT_EQ(0, rc, "cookie1");
1984
1985 /* send message over chan1 and chan2 */
1986 iov.iov_base = buf0;
1987 iov.iov_len = sizeof(buf0);
1988 msg.num_handles = 0;
1989 msg.handles = NULL;
1990 msg.num_iov = 1;
1991 msg.iov = &iov;
1992
1993 rc = send_msg(chan1, &msg);
1994 EXPECT_EQ(64, rc, "send over chan1");
1995
1996 hset1 = handle_set_create();
1997 EXPECT_GE_ZERO((int)hset1, "create handle set1");
1998
1999 ABORT_IF_NOT_OK(abort_test);
2000
2001 /* chan1 to hset1 */
2002 evt.handle = chan1;
2003 evt.event = ~0U;
2004 evt.cookie = cookie11;
2005 rc = handle_set_ctrl(hset1, HSET_ADD, &evt);
2006 EXPECT_EQ(0, rc, "add chan1 to hset1");
2007
2008 /* wait of chan1 handle */
2009 rc = wait(chan1, &evt, 100);
2010 EXPECT_EQ(0, rc, "wait on chan1");
2011 EXPECT_EQ(chan1, evt.handle, "event.handle");
2012 EXPECT_EQ(cookie1, evt.cookie, "event.cookie");
2013
2014 /* wait on hset1 (should get chan1) */
2015 rc = wait(hset1, &evt, 100);
2016 EXPECT_EQ(0, rc, "wait on hset1");
2017 EXPECT_EQ(chan1, evt.handle, "event.handle");
2018 EXPECT_EQ(cookie11, evt.cookie, "event.cookie");
2019
2020 /* mask off chan1 in hset1 */
2021 evt.handle = chan1;
2022 evt.event = 0;
2023 evt.cookie = cookie11;
2024 rc = handle_set_ctrl(hset1, HSET_MOD, &evt);
2025 EXPECT_EQ(0, rc, "mod chan1 in hset1");
2026
2027 /* wait on hset1 (should get chan1) */
2028 rc = wait(hset1, &evt, 100);
2029 EXPECT_EQ(ERR_TIMED_OUT, rc, "wait on hset1");
2030
2031 /* unmask off chan1 in hset1 */
2032 evt.handle = chan1;
2033 evt.event = ~0U;
2034 evt.cookie = cookie11;
2035 rc = handle_set_ctrl(hset1, HSET_MOD, &evt);
2036 EXPECT_EQ(0, rc, "mod chan1 in hset1");
2037
2038 /* wait on hset1 (should get chan1) */
2039 rc = wait(hset1, &evt, 100);
2040 EXPECT_EQ(0, rc, "wait on hset1");
2041 EXPECT_EQ(chan1, evt.handle, "event.handle");
2042 EXPECT_EQ(cookie11, evt.cookie, "event.cookie");
2043
2044 abort_test:
2045 close(chan1);
2046 close(hset1);
2047 }
2048
2049 /****************************************************************************/
2050
TEST(ipc,send_handle)2051 TEST(ipc, send_handle) {
2052 int rc;
2053 struct iovec iov;
2054 ipc_msg_t msg;
2055 handle_t hchan1;
2056 handle_t hchan2;
2057 uint8_t buf0[64];
2058 char path[MAX_PORT_PATH_LEN];
2059
2060 /* prepare test buffer */
2061 fill_test_buf(buf0, sizeof(buf0), 0x55);
2062
2063 /* open connection to datasink service */
2064 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "datasink");
2065 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
2066 EXPECT_GT_ZERO(rc, "connect to datasink");
2067 ABORT_IF_NOT_OK(err_connect1);
2068 hchan1 = (handle_t)rc;
2069
2070 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
2071 EXPECT_GT_ZERO(rc, "connect to datasink");
2072 ABORT_IF_NOT_OK(err_connect2);
2073 hchan2 = (handle_t)rc;
2074
2075 /* send hchan2 handle over hchan1 connection */
2076 iov.iov_base = buf0;
2077 iov.iov_len = sizeof(buf0);
2078 msg.iov = &iov;
2079 msg.num_iov = 1;
2080 msg.handles = &hchan2;
2081 msg.num_handles = 1;
2082
2083 /* send and wait a bit */
2084 rc = send_msg(hchan1, &msg);
2085 EXPECT_EQ(64, rc, "send handle");
2086 trusty_nanosleep(0, 0, 100 * MSEC);
2087
2088 /* send it again and close it */
2089 rc = send_msg(hchan1, &msg);
2090 EXPECT_EQ(64, rc, "send handle");
2091 rc = close(hchan2);
2092 EXPECT_EQ(NO_ERROR, rc, "close chan2");
2093
2094 err_connect2:
2095 rc = close(hchan1);
2096 EXPECT_EQ(NO_ERROR, rc, "close chan1");
2097 err_connect1:;
2098 }
2099
TEST(ipc,send_handle_negative)2100 TEST(ipc, send_handle_negative) {
2101 int rc;
2102 ipc_msg_t msg;
2103 handle_t hchan;
2104 handle_t hsend[10];
2105 char path[MAX_PORT_PATH_LEN];
2106
2107 /* open connection to datasink service */
2108 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "datasink");
2109 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
2110 EXPECT_GT_ZERO(rc, "connect to datasink");
2111 ABORT_IF_NOT_OK(err_connect);
2112 hchan = (handle_t)rc;
2113
2114 for (unsigned int i = 0; i < countof(hsend); i++)
2115 hsend[i] = hchan;
2116
2117 /* send 8 copies of yourself to datasync (should be fine) */
2118 msg.iov = NULL;
2119 msg.num_iov = 0;
2120 msg.handles = &hsend[0];
2121 msg.num_handles = 8;
2122 rc = send_msg(hchan, &msg);
2123 EXPECT_EQ(0, rc, "send handle");
2124
2125 /* send 8 copies of yourself to datasync with handle poiting to NULL*/
2126 msg.iov = NULL;
2127 msg.num_iov = 0;
2128 msg.handles = NULL;
2129 msg.num_handles = 8;
2130 rc = send_msg(hchan, &msg);
2131 EXPECT_EQ(ERR_FAULT, rc, "send handle");
2132
2133 /* call with invalid handle should return ERR_FAULT */
2134 msg.handles = (handle_t*)0x100;
2135 msg.num_handles = 8;
2136 rc = send_msg(hchan, &msg);
2137 EXPECT_EQ(ERR_FAULT, rc, "send handle");
2138
2139 /* send more than 8, should fail */
2140 msg.handles = &hsend[0];
2141 msg.num_handles = 10;
2142 rc = send_msg(hchan, &msg);
2143 EXPECT_EQ(ERR_TOO_BIG, rc, "send handle");
2144
2145 rc = close(hchan);
2146 EXPECT_EQ(NO_ERROR, rc, "close chan");
2147 err_connect:;
2148 }
2149
TEST(ipc,recv_handle)2150 TEST(ipc, recv_handle) {
2151 int rc;
2152 handle_t hchan1;
2153 handle_t hchan2;
2154 handle_t hrecv[2];
2155 uint8_t buf0[64];
2156 struct iovec iov;
2157 ipc_msg_t msg;
2158 uevent_t evt;
2159 ipc_msg_info_t inf;
2160 char path[MAX_PORT_PATH_LEN];
2161
2162 /* prepare test buffer */
2163 fill_test_buf(buf0, sizeof(buf0), 0x55);
2164
2165 /* open connection to echo service */
2166 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "echo");
2167 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
2168 EXPECT_GT_ZERO(rc, "connect to echo");
2169 ABORT_IF_NOT_OK(err_connect1);
2170 hchan1 = (handle_t)rc;
2171
2172 /* open second connection to echo service */
2173 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "echo");
2174 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
2175 EXPECT_GT_ZERO(rc, "connect to echo");
2176 ABORT_IF_NOT_OK(err_connect2);
2177 hchan2 = (handle_t)rc;
2178
2179 /* send message with handle */
2180 iov.iov_base = buf0;
2181 iov.iov_len = sizeof(buf0);
2182 msg.iov = &iov;
2183 msg.num_iov = 1;
2184 msg.handles = &hchan2;
2185 msg.num_handles = 1;
2186
2187 rc = send_msg(hchan1, &msg);
2188 EXPECT_EQ(64, rc, "send_handle");
2189
2190 /* wait for reply */
2191 rc = wait(hchan1, &evt, 1000);
2192 EXPECT_EQ(0, rc, "wait for reply");
2193 EXPECT_EQ(hchan1, evt.handle, "event.handle");
2194
2195 /* get reply message */
2196 rc = get_msg(hchan1, &inf);
2197 EXPECT_EQ(NO_ERROR, rc, "getting echo reply");
2198 EXPECT_EQ(sizeof(buf0), inf.len, "reply len");
2199 EXPECT_EQ(1, inf.num_handles, "reply num_handles");
2200
2201 /* read reply data and no handles */
2202 hrecv[0] = INVALID_IPC_HANDLE;
2203 hrecv[1] = INVALID_IPC_HANDLE;
2204 msg.handles = &hrecv[0];
2205 msg.num_handles = 0;
2206 rc = read_msg(hchan1, inf.id, 0, &msg);
2207 EXPECT_EQ(64, rc, "reading echo reply");
2208
2209 rc = close(hrecv[0]);
2210 EXPECT_EQ(ERR_BAD_HANDLE, rc, "close reply handle");
2211
2212 rc = close(hrecv[1]);
2213 EXPECT_EQ(ERR_BAD_HANDLE, rc, "close reply handle");
2214
2215 /* read reply data and 1 handle */
2216 hrecv[0] = INVALID_IPC_HANDLE;
2217 hrecv[1] = INVALID_IPC_HANDLE;
2218 msg.handles = &hrecv[0];
2219 msg.num_handles = 1;
2220 rc = read_msg(hchan1, inf.id, 0, &msg);
2221 EXPECT_EQ(64, rc, "reading echo reply");
2222
2223 rc = close(hrecv[0]);
2224 EXPECT_EQ(0, rc, "close reply handle");
2225
2226 rc = close(hrecv[1]);
2227 EXPECT_EQ(ERR_BAD_HANDLE, rc, "close reply handle");
2228
2229 /* read reply data and 2 handles (second one should be invalid) */
2230 hrecv[0] = INVALID_IPC_HANDLE;
2231 hrecv[1] = INVALID_IPC_HANDLE;
2232 msg.handles = &hrecv[0];
2233 msg.num_handles = 2;
2234 rc = read_msg(hchan1, inf.id, 0, &msg);
2235 EXPECT_EQ(64, rc, "reading echo reply");
2236
2237 rc = close(hrecv[0]);
2238 EXPECT_EQ(0, rc, "close reply handle");
2239
2240 rc = close(hrecv[1]);
2241 EXPECT_EQ(ERR_BAD_HANDLE, rc, "close reply handle");
2242
2243 /* read 1 handle with no data */
2244 hrecv[0] = INVALID_IPC_HANDLE;
2245 hrecv[1] = INVALID_IPC_HANDLE;
2246 msg.num_iov = 0;
2247 msg.handles = &hrecv[0];
2248 msg.num_handles = 1;
2249 rc = read_msg(hchan1, inf.id, 0, &msg);
2250 EXPECT_EQ(0, rc, "reading echo reply");
2251
2252 EXPECT_EQ(INVALID_IPC_HANDLE, hrecv[1], "reading echo reply");
2253
2254 /* read same handle for the second time */
2255 msg.handles = &hrecv[1];
2256 msg.num_handles = 1;
2257 rc = read_msg(hchan1, inf.id, 0, &msg);
2258 EXPECT_EQ(0, rc, "reading echo reply");
2259
2260 rc = close(hrecv[0]);
2261 EXPECT_EQ(0, rc, "close reply handle");
2262
2263 rc = close(hrecv[1]);
2264 EXPECT_EQ(0, rc, "close reply handle");
2265
2266 /* discard reply */
2267 rc = put_msg(hchan1, inf.id);
2268 EXPECT_EQ(NO_ERROR, rc, "putting echo reply");
2269
2270 close(hchan2);
2271 EXPECT_EQ(NO_ERROR, rc, "close chan2");
2272 err_connect2:
2273 close(hchan1);
2274 EXPECT_EQ(NO_ERROR, rc, "close chan1");
2275 err_connect1:;
2276 }
2277
TEST(ipc,recv_handle_negative)2278 TEST(ipc, recv_handle_negative) {
2279 int rc;
2280 handle_t hchan1;
2281 ipc_msg_t msg;
2282 uevent_t evt;
2283 ipc_msg_info_t inf;
2284 char path[MAX_PORT_PATH_LEN];
2285
2286 /* open connection to echo service */
2287 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "echo");
2288 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
2289 EXPECT_GT_ZERO(rc, "connect to echo");
2290 ABORT_IF_NOT_OK(err_connect1);
2291 hchan1 = (handle_t)rc;
2292
2293 /* send message with handle attached */
2294 msg.iov = NULL;
2295 msg.num_iov = 0;
2296 msg.handles = &hchan1;
2297 msg.num_handles = 1;
2298
2299 rc = send_msg(hchan1, &msg);
2300 EXPECT_EQ(0, rc, "send_handle");
2301
2302 /* wait for reply */
2303 rc = wait(hchan1, &evt, 1000);
2304 EXPECT_EQ(0, rc, "wait for reply");
2305 EXPECT_EQ(hchan1, evt.handle, "event.handle");
2306
2307 /* get reply message */
2308 rc = get_msg(hchan1, &inf);
2309 EXPECT_EQ(NO_ERROR, rc, "getting echo reply");
2310 EXPECT_EQ(0, inf.len, "reply len");
2311 EXPECT_EQ(1, inf.num_handles, "reply num_handles");
2312
2313 /* read reply data with handles pointing to NULL */
2314 msg.handles = NULL;
2315 msg.num_handles = 1;
2316 rc = read_msg(hchan1, inf.id, 0, &msg);
2317 EXPECT_EQ(ERR_FAULT, rc, "reading echo reply");
2318
2319 /* read reply data and bad handle ptr */
2320 msg.handles = (handle_t*)0x100;
2321 msg.num_handles = 1;
2322 rc = read_msg(hchan1, inf.id, 0, &msg);
2323 EXPECT_EQ(ERR_FAULT, rc, "reading echo reply");
2324
2325 /* discard reply */
2326 rc = put_msg(hchan1, inf.id);
2327 EXPECT_EQ(NO_ERROR, rc, "putting echo reply");
2328
2329 rc = close(hchan1);
2330 EXPECT_EQ(NO_ERROR, rc, "close chan1");
2331 err_connect1:;
2332 }
2333
TEST(ipc,send_handle_bulk)2334 TEST(ipc, send_handle_bulk) {
2335 int rc;
2336 struct iovec iov;
2337 ipc_msg_t msg;
2338 handle_t hchan1;
2339 handle_t hchan2;
2340 uint8_t buf0[64];
2341 char path[MAX_PORT_PATH_LEN];
2342
2343 /* prepare test buffer */
2344 fill_test_buf(buf0, sizeof(buf0), 0x55);
2345
2346 /* open connection to datasink service */
2347 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "datasink");
2348 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
2349 EXPECT_GT_ZERO(rc, "connect to datasink");
2350 ABORT_IF_NOT_OK(err_connect1);
2351 hchan1 = (handle_t)rc;
2352
2353 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
2354 EXPECT_GT_ZERO(rc, "connect to datasink");
2355 ABORT_IF_NOT_OK(err_connect2);
2356 hchan2 = (handle_t)rc;
2357
2358 /* send hchan2 handle over hchan1 connection */
2359 iov.iov_base = buf0;
2360 iov.iov_len = sizeof(buf0);
2361 msg.iov = &iov;
2362 msg.num_iov = 1;
2363 msg.handles = &hchan2;
2364 msg.num_handles = 1;
2365
2366 for (unsigned int i = 0; (i < 10000) && !HasFailure(); i++) {
2367 while (!HasFailure()) {
2368 rc = send_msg(hchan1, &msg);
2369 if (rc == ERR_NOT_ENOUGH_BUFFER) { /* wait for room */
2370 uevent_t uevt;
2371 unsigned int exp_event = IPC_HANDLE_POLL_SEND_UNBLOCKED;
2372 rc = wait(hchan1, &uevt, 10000);
2373 EXPECT_EQ(NO_ERROR, rc, "waiting for space");
2374 EXPECT_EQ(hchan1, uevt.handle, "waiting for space");
2375 EXPECT_EQ(exp_event, uevt.event, "waiting for space");
2376 } else {
2377 EXPECT_EQ(64, rc, "send_msg bulk");
2378 break;
2379 }
2380 }
2381 }
2382 rc = close(hchan2);
2383 EXPECT_EQ(NO_ERROR, rc, "close chan2");
2384
2385 /* repeat the same while closing handle after sending it */
2386 for (unsigned int i = 0; (i < 10000) && !HasFailure(); i++) {
2387 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
2388 EXPECT_GT_ZERO(rc, "connect to datasink");
2389 ABORT_IF_NOT_OK(err_connect2);
2390 hchan2 = (handle_t)rc;
2391
2392 /* send hchan2 handle over hchan1 connection */
2393 iov.iov_base = buf0;
2394 iov.iov_len = sizeof(buf0);
2395 msg.iov = &iov;
2396 msg.num_iov = 1;
2397 msg.handles = &hchan2;
2398 msg.num_handles = 1;
2399
2400 while (!HasFailure()) {
2401 rc = send_msg(hchan1, &msg);
2402 if (rc == ERR_NOT_ENOUGH_BUFFER) { /* wait for room */
2403 uevent_t uevt;
2404 unsigned int exp_event = IPC_HANDLE_POLL_SEND_UNBLOCKED;
2405 rc = wait(hchan1, &uevt, 10000);
2406 EXPECT_EQ(NO_ERROR, rc, "waiting for space");
2407 EXPECT_EQ(hchan1, uevt.handle, "waiting for space");
2408 EXPECT_EQ(exp_event, uevt.event, "waiting for space");
2409 } else {
2410 EXPECT_EQ(64, rc, "send_msg bulk");
2411 break;
2412 }
2413 }
2414 rc = close(hchan2);
2415 EXPECT_EQ(NO_ERROR, rc, "close chan2");
2416 }
2417
2418 err_connect2:
2419 rc = close(hchan1);
2420 EXPECT_EQ(NO_ERROR, rc, "close chan1");
2421 err_connect1:;
2422 }
2423
TEST(ipc,echo_handle_bulk)2424 TEST(ipc, echo_handle_bulk) {
2425 int rc;
2426 handle_t hchan1;
2427 handle_t hchan2;
2428 handle_t hrecv;
2429 uint8_t buf0[64];
2430 struct iovec iov;
2431 ipc_msg_t msg;
2432 uevent_t evt;
2433 ipc_msg_info_t inf;
2434 char path[MAX_PORT_PATH_LEN];
2435
2436 /* prepare test buffer */
2437 fill_test_buf(buf0, sizeof(buf0), 0x55);
2438
2439 /* open connection to echo service */
2440 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "echo");
2441 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
2442 EXPECT_GT_ZERO(rc, "connect to echo");
2443 ABORT_IF_NOT_OK(err_connect1);
2444 hchan1 = (handle_t)rc;
2445
2446 /* open second connection to echo service */
2447 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "echo");
2448 rc = connect(path, IPC_CONNECT_WAIT_FOR_PORT);
2449 EXPECT_GT_ZERO(rc, "connect to echo");
2450 ABORT_IF_NOT_OK(err_connect2);
2451 hchan2 = (handle_t)rc;
2452
2453 /* send the same handle 10000 times */
2454 for (unsigned int i = 0; (i < 10000) && !HasFailure(); i++) {
2455 /* send message with handle */
2456 iov.iov_base = buf0;
2457 iov.iov_len = sizeof(buf0);
2458 msg.iov = &iov;
2459 msg.num_iov = 1;
2460 msg.handles = &hchan2;
2461 msg.num_handles = 1;
2462
2463 while (!HasFailure()) {
2464 rc = send_msg(hchan1, &msg);
2465 EXPECT_EQ(64, rc, "send_handle");
2466 if (rc == ERR_NOT_ENOUGH_BUFFER) { /* wait for room */
2467 uevent_t uevt;
2468 unsigned int exp_event = IPC_HANDLE_POLL_SEND_UNBLOCKED;
2469 rc = wait(hchan1, &uevt, 10000);
2470 EXPECT_EQ(NO_ERROR, rc, "waiting for space");
2471 EXPECT_EQ(hchan1, uevt.handle, "waiting for space");
2472 EXPECT_EQ(exp_event, uevt.event, "waiting for space");
2473 } else {
2474 EXPECT_EQ(64, rc, "send_msg bulk");
2475 break;
2476 }
2477 }
2478
2479 /* wait for reply */
2480 rc = wait(hchan1, &evt, 1000);
2481 EXPECT_EQ(0, rc, "wait for reply");
2482 EXPECT_EQ(hchan1, evt.handle, "event.handle");
2483
2484 /* get reply message */
2485 rc = get_msg(hchan1, &inf);
2486 EXPECT_EQ(NO_ERROR, rc, "getting echo reply");
2487 EXPECT_EQ(sizeof(buf0), inf.len, "reply len");
2488 EXPECT_EQ(1, inf.num_handles, "reply num_handles");
2489
2490 /* read reply data and 1 handle */
2491 hrecv = INVALID_IPC_HANDLE;
2492 msg.handles = &hrecv;
2493 msg.num_handles = 1;
2494 rc = read_msg(hchan1, inf.id, 0, &msg);
2495 EXPECT_EQ(64, rc, "reading echo reply");
2496
2497 /* discard reply */
2498 rc = put_msg(hchan1, inf.id);
2499 EXPECT_EQ(NO_ERROR, rc, "putting echo reply");
2500
2501 /* close received handle */
2502 rc = close(hrecv);
2503 EXPECT_EQ(0, rc, "close reply handle");
2504 }
2505
2506 rc = close(hchan2);
2507 EXPECT_EQ(NO_ERROR, rc, "close chan2");
2508 err_connect2:
2509 rc = close(hchan1);
2510 EXPECT_EQ(NO_ERROR, rc, "close chan1");
2511 err_connect1:;
2512 }
2513
2514 /*
2515 * TIPC wrapper libtrary tests
2516 */
TEST(ipc,tipc_connect)2517 TEST(ipc, tipc_connect) {
2518 int rc;
2519 handle_t h = INVALID_IPC_HANDLE;
2520 char path[MAX_PORT_PATH_LEN];
2521
2522 /* Make tipc_connect fail and check if handle is unchanged */
2523 rc = tipc_connect(&h, NULL);
2524 EXPECT_EQ(ERR_FAULT, rc, "failed tipc_connect");
2525 EXPECT_EQ(INVALID_IPC_HANDLE, h, "failed tipc_connect")
2526
2527 rc = close(h);
2528 EXPECT_EQ(ERR_BAD_HANDLE, rc, "close failed tipc_connect");
2529
2530 /* Normal case: should succeed */
2531 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "echo");
2532 rc = tipc_connect(&h, path);
2533 EXPECT_EQ(0, rc, "tipc_connect");
2534 EXPECT_GT_ZERO(h, "tipc_connect");
2535
2536 rc = close(h);
2537 EXPECT_EQ(0, rc, "close tipc_connect");
2538 }
2539
TEST(ipc,tipc_send_recv_1)2540 TEST(ipc, tipc_send_recv_1) {
2541 int rc;
2542 uint8_t tx_buf[64];
2543 uint8_t rx_buf[64];
2544 char path[MAX_PORT_PATH_LEN];
2545 handle_t h = INVALID_IPC_HANDLE;
2546 struct uevent evt = UEVENT_INITIAL_VALUE(evt);
2547
2548 /* open connection to echo service */
2549 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "echo");
2550 rc = tipc_connect(&h, path);
2551 EXPECT_EQ(0, rc, "tipc_connect");
2552 EXPECT_GT_ZERO(h, "tipc_connect");
2553
2554 /* send/receive zero length message */
2555 rc = tipc_send1(h, NULL, 0);
2556 EXPECT_EQ(0, rc, "tipc_send_one");
2557
2558 rc = wait(h, &evt, INFINITE_TIME);
2559 EXPECT_EQ(0, rc, "wait for reply");
2560
2561 rc = tipc_recv1(h, 0, rx_buf, sizeof(rx_buf));
2562 EXPECT_EQ(0, rc, "tipc_recv_one");
2563
2564 /* send/receive normal message: should succeed */
2565 memset(tx_buf, 0x55, sizeof(tx_buf));
2566 memset(rx_buf, 0xaa, sizeof(rx_buf));
2567
2568 rc = tipc_send1(h, tx_buf, sizeof(tx_buf) / 2);
2569 EXPECT_EQ(sizeof(tx_buf) / 2, (size_t)rc, "tipc_send1");
2570
2571 rc = wait(h, &evt, INFINITE_TIME);
2572 EXPECT_EQ(0, rc, "wait for reply");
2573
2574 rc = tipc_recv1(h, 0, rx_buf, sizeof(rx_buf));
2575 EXPECT_EQ(sizeof(tx_buf) / 2, (size_t)rc, "tipc_recv1");
2576
2577 rc = memcmp(tx_buf, rx_buf, sizeof(tx_buf) / 2);
2578 EXPECT_EQ(0, rc, "memcmp");
2579
2580 /*
2581 * send and then receive into small buffer: test should fail and
2582 * message should be discarded
2583 */
2584 memset(tx_buf, 0x55, sizeof(tx_buf));
2585 memset(rx_buf, 0xaa, sizeof(rx_buf));
2586
2587 rc = tipc_send1(h, tx_buf, sizeof(tx_buf));
2588 EXPECT_EQ(sizeof(tx_buf), (size_t)rc, "tipc_send1");
2589
2590 rc = wait(h, &evt, INFINITE_TIME);
2591 EXPECT_EQ(0, rc, "wait for reply");
2592
2593 rc = tipc_recv1(h, 0, rx_buf, sizeof(rx_buf) / 2);
2594 EXPECT_EQ(ERR_BAD_LEN, rc, "tipc_recv1");
2595
2596 /*
2597 * send and then receive message shorter them minimum: should fail
2598 */
2599 memset(tx_buf, 0x55, sizeof(tx_buf));
2600 memset(rx_buf, 0xaa, sizeof(rx_buf));
2601
2602 rc = tipc_send1(h, tx_buf, sizeof(tx_buf) / 2);
2603 EXPECT_EQ(sizeof(tx_buf) / 2, (size_t)rc, "tipc_send1");
2604
2605 rc = wait(h, &evt, INFINITE_TIME);
2606 EXPECT_EQ(0, rc, "wait for reply");
2607
2608 rc = tipc_recv1(h, sizeof(rx_buf), rx_buf, sizeof(rx_buf));
2609 EXPECT_EQ(ERR_BAD_LEN, rc, "tipc_recv1");
2610
2611 /* clean up */
2612 rc = close(h);
2613 EXPECT_EQ(0, rc, "close tipc_connect");
2614 }
2615
TEST(ipc,tipc_send_recv_hdr_payload)2616 TEST(ipc, tipc_send_recv_hdr_payload) {
2617 int rc;
2618 uint8_t tx_hdr[32];
2619 uint8_t tx_buf[64];
2620 uint8_t rx_hdr[32];
2621 uint8_t rx_buf[64];
2622 char path[MAX_PORT_PATH_LEN];
2623 handle_t h = INVALID_IPC_HANDLE;
2624 struct uevent evt = UEVENT_INITIAL_VALUE(evt);
2625
2626 sprintf(path, "%s.srv.%s", SRV_PATH_BASE, "echo");
2627 rc = tipc_connect(&h, path);
2628 EXPECT_EQ(0, rc, "tipc_connect");
2629 EXPECT_GT_ZERO(h, "tipc_connect");
2630
2631 /* send/receive normal message: should succeed */
2632 memset(tx_hdr, 0xaa, sizeof(tx_hdr));
2633 memset(tx_buf, 0x55, sizeof(tx_buf));
2634 memset(rx_hdr, 0x55, sizeof(rx_hdr));
2635 memset(rx_buf, 0xaa, sizeof(rx_buf));
2636
2637 rc = tipc_send2(h, tx_hdr, sizeof(tx_hdr), tx_buf, sizeof(tx_buf) / 2);
2638 EXPECT_EQ(sizeof(tx_hdr) + sizeof(tx_buf) / 2, (size_t)rc, "tipc_send_two");
2639
2640 rc = wait(h, &evt, INFINITE_TIME);
2641 EXPECT_EQ(0, rc, "wait for reply");
2642
2643 rc = tipc_recv_hdr_payload(h, rx_hdr, sizeof(rx_hdr), rx_buf,
2644 sizeof(rx_buf));
2645 EXPECT_EQ(sizeof(tx_hdr) + sizeof(tx_buf) / 2, (size_t)rc,
2646 "tipc_recv_hdr_payload");
2647
2648 rc = memcmp(tx_hdr, rx_hdr, sizeof(tx_hdr));
2649 EXPECT_EQ(0, rc, "memcmp");
2650
2651 rc = memcmp(tx_buf, rx_buf, sizeof(tx_buf) / 2);
2652 EXPECT_EQ(0, rc, "memcmp");
2653
2654 /* send/receive message with zero length header */
2655 memset(tx_hdr, 0xaa, sizeof(tx_hdr));
2656 memset(tx_buf, 0x55, sizeof(tx_buf));
2657 memset(rx_hdr, 0xaa, sizeof(rx_hdr));
2658 memset(rx_buf, 0xaa, sizeof(rx_buf));
2659
2660 rc = tipc_send2(h, NULL, 0, tx_buf, sizeof(tx_buf) / 2);
2661 EXPECT_EQ(sizeof(tx_buf) / 2, (size_t)rc, "tipc_send_two");
2662
2663 rc = wait(h, &evt, INFINITE_TIME);
2664 EXPECT_EQ(0, rc, "wait for reply");
2665
2666 rc = tipc_recv_hdr_payload(h, NULL, 0, rx_buf, sizeof(rx_buf));
2667 EXPECT_EQ(sizeof(tx_buf) / 2, (size_t)rc, "tipc_recv_hdr_payload");
2668
2669 rc = memcmp(tx_buf, rx_buf, sizeof(tx_buf) / 2);
2670 EXPECT_EQ(0, rc, "memcmp");
2671
2672 /* send/receive message with zero length payload */
2673 memset(tx_hdr, 0xaa, sizeof(tx_hdr));
2674 memset(tx_buf, 0x55, sizeof(tx_buf));
2675 memset(rx_hdr, 0xaa, sizeof(rx_hdr));
2676 memset(rx_buf, 0xaa, sizeof(rx_buf));
2677
2678 rc = tipc_send2(h, tx_hdr, sizeof(tx_hdr), NULL, 0);
2679 EXPECT_EQ(sizeof(tx_hdr), (size_t)rc, "tipc_send_two");
2680
2681 rc = wait(h, &evt, INFINITE_TIME);
2682 EXPECT_EQ(0, rc, "wait for reply");
2683
2684 rc = tipc_recv_hdr_payload(h, rx_hdr, sizeof(rx_hdr), rx_buf,
2685 sizeof(rx_buf));
2686 EXPECT_EQ(sizeof(tx_hdr), (size_t)rc, "tipc_recv_hdr_payload");
2687
2688 rc = memcmp(tx_hdr, rx_hdr, sizeof(tx_hdr));
2689 EXPECT_EQ(0, rc, "memcmp");
2690
2691 /*
2692 * send/receive header into larger buffer: should fail as more data expected
2693 */
2694 memset(tx_hdr, 0xaa, sizeof(tx_hdr));
2695 memset(tx_buf, 0x55, sizeof(tx_buf));
2696 memset(rx_hdr, 0xaa, sizeof(rx_hdr));
2697 memset(rx_buf, 0xaa, sizeof(rx_buf));
2698
2699 rc = tipc_send2(h, tx_hdr, sizeof(tx_hdr) / 2, NULL, 0);
2700 EXPECT_EQ(sizeof(tx_hdr) / 2, (size_t)rc, "tipc_send_two");
2701
2702 rc = wait(h, &evt, INFINITE_TIME);
2703 EXPECT_EQ(0, rc, "wait for reply");
2704
2705 rc = tipc_recv_hdr_payload(h, rx_hdr, sizeof(rx_hdr), NULL, 0);
2706 EXPECT_EQ(ERR_BAD_LEN, rc, "tipc_recv_hdr_payload");
2707
2708 /*
2709 * send/receive message into short buffer: should fail as bigger buffer
2710 * is expected
2711 */
2712 memset(tx_hdr, 0xaa, sizeof(tx_hdr));
2713 memset(tx_buf, 0x55, sizeof(tx_buf));
2714 memset(rx_hdr, 0xaa, sizeof(rx_hdr));
2715 memset(rx_buf, 0xaa, sizeof(rx_buf));
2716
2717 rc = tipc_send2(h, tx_hdr, sizeof(tx_hdr), tx_buf, sizeof(tx_buf));
2718 EXPECT_EQ(sizeof(tx_hdr) + sizeof(tx_buf), (size_t)rc, "tipc_send_two");
2719
2720 rc = wait(h, &evt, INFINITE_TIME);
2721 EXPECT_EQ(0, rc, "wait for reply");
2722
2723 rc = tipc_recv_hdr_payload(h, rx_hdr, sizeof(rx_hdr), rx_buf,
2724 sizeof(rx_buf) / 2);
2725 EXPECT_EQ(ERR_BAD_LEN, rc, "tipc_recv_hdr_payload");
2726
2727 rc = close(h);
2728 EXPECT_EQ(0, rc, "close tipc_connect");
2729 }
2730
2731 /****************************************************************************/
2732
TEST(ipc,dup_is_different)2733 TEST(ipc, dup_is_different) {
2734 handle_t hset1;
2735 handle_t hset1_dup;
2736
2737 hset1 = handle_set_create();
2738 EXPECT_GE_ZERO((int)hset1, "create handle set1");
2739
2740 hset1_dup = dup(hset1);
2741 EXPECT_GE_ZERO((int)hset1_dup, "duplicate handle set1");
2742
2743 ABORT_IF_NOT_OK(abort_test);
2744
2745 EXPECT_NE(hset1, hset1_dup, "duplicate is different");
2746
2747 abort_test:
2748 close(hset1);
2749 close(hset1_dup);
2750 }
2751
2752 /****************************************************************************/
2753
kernel_wait_any_bug_workaround(void)2754 static void kernel_wait_any_bug_workaround(void) {
2755 int ret;
2756 uevent_t event;
2757
2758 /* HACK: clear stuck event on handleset */
2759 ret = wait_any(&event, 0);
2760 if (ret == 0) {
2761 TLOGI("retry ret %d, event handle %d, event 0x%x\n", ret, event.handle,
2762 event.event);
2763 ret = wait(event.handle, &event, 0);
2764 TLOGI("nested ret %d, event handle %d, event 0x%x\n", ret, event.handle,
2765 event.event);
2766 }
2767 }
2768
run_test(struct unittest * test)2769 static bool run_test(struct unittest* test) {
2770 handle_base = (handle_t)USER_BASE_HANDLE;
2771
2772 /*
2773 * HACK: We know a connection was made after _port_handle was created to
2774 * trigger the test, so we need to add two to _port_handle to get the first
2775 * free handle index.
2776 */
2777 first_free_handle_index = test->_port_handle + 2 - handle_base;
2778 TLOGI("first_free_handle_index: %d\n", first_free_handle_index);
2779
2780 kernel_wait_any_bug_workaround();
2781
2782 return RUN_ALL_TESTS();
2783 }
2784
2785 /*
2786 * Application entry point
2787 */
main(void)2788 int main(void) {
2789 struct unittest ipc_unittest = {
2790 .port_name = SRV_PATH_BASE ".ctrl",
2791 .run_test = run_test,
2792 };
2793 struct unittest* unittest = &ipc_unittest;
2794
2795 TLOGD("Welcome to IPC unittest!!!\n");
2796
2797 return unittest_main(&unittest, 1);
2798 }
2799