xref: /aosp_15_r20/system/libfmq/tests/fmq_unit_tests.cpp (revision be431cd81a9a2349eaea34eb56fcf6d1608da596)
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <android-base/logging.h>
18 #include <asm-generic/mman.h>
19 #include <fmq/AidlMessageQueue.h>
20 #include <fmq/AidlMessageQueueCpp.h>
21 #include <fmq/ConvertMQDescriptors.h>
22 #include <fmq/EventFlag.h>
23 #include <fmq/MessageQueue.h>
24 #include <gtest/gtest-death-test.h>
25 #include <gtest/gtest.h>
26 #include <sys/resource.h>
27 #include <atomic>
28 #include <cstdlib>
29 #include <filesystem>
30 #include <sstream>
31 #include <thread>
32 
33 using aidl::android::hardware::common::fmq::SynchronizedReadWrite;
34 using aidl::android::hardware::common::fmq::UnsynchronizedWrite;
35 using cppSynchronizedReadWrite = android::hardware::common::fmq::SynchronizedReadWrite;
36 using cppUnSynchronizedWrite = android::hardware::common::fmq::UnsynchronizedWrite;
37 
38 using android::hardware::kSynchronizedReadWrite;
39 using android::hardware::kUnsynchronizedWrite;
40 
41 enum EventFlagBits : uint32_t {
42     kFmqNotFull = 1 << 0,
43     kFmqNotEmpty = 1 << 1,
44 };
45 
46 typedef android::AidlMessageQueue<uint8_t, SynchronizedReadWrite> AidlMessageQueueSync;
47 typedef android::AidlMessageQueue<uint8_t, UnsynchronizedWrite> AidlMessageQueueUnsync;
48 typedef android::AidlMessageQueueCpp<uint8_t, cppSynchronizedReadWrite> cppAidlMessageQueueSync;
49 typedef android::AidlMessageQueueCpp<uint8_t, cppUnSynchronizedWrite> cppAidlMessageQueueUnsync;
50 typedef android::hardware::MessageQueue<uint8_t, kSynchronizedReadWrite> MessageQueueSync;
51 typedef android::hardware::MessageQueue<uint8_t, kUnsynchronizedWrite> MessageQueueUnsync;
52 
53 typedef android::AidlMessageQueue<uint16_t, SynchronizedReadWrite> AidlMessageQueueSync16;
54 typedef android::AidlMessageQueueCpp<uint16_t, cppSynchronizedReadWrite> cppAidlMessageQueueSync16;
55 typedef android::hardware::MessageQueue<uint16_t, kSynchronizedReadWrite> MessageQueueSync16;
56 typedef android::AidlMessageQueue<uint16_t, UnsynchronizedWrite> AidlMessageQueueUnsync16;
57 typedef android::AidlMessageQueueCpp<uint16_t, cppUnSynchronizedWrite> cppAidlMessageQueueUnsync16;
58 typedef android::hardware::MessageQueue<uint16_t, kUnsynchronizedWrite> MessageQueueUnsync16;
59 
60 typedef android::hardware::MessageQueue<uint8_t, kSynchronizedReadWrite> MessageQueueSync8;
61 typedef android::hardware::MQDescriptor<uint8_t, kSynchronizedReadWrite> HidlMQDescSync8;
62 typedef android::AidlMessageQueue<int8_t, SynchronizedReadWrite> AidlMessageQueueSync8;
63 typedef aidl::android::hardware::common::fmq::MQDescriptor<int8_t, SynchronizedReadWrite>
64         AidlMQDescSync8;
65 typedef android::AidlMessageQueueCpp<int8_t, cppSynchronizedReadWrite> cppAidlMessageQueueSync8;
66 typedef android::hardware::common::fmq::MQDescriptor<int8_t, cppSynchronizedReadWrite>
67         cppAidlMQDescSync8;
68 
69 typedef android::hardware::MessageQueue<uint8_t, kUnsynchronizedWrite> MessageQueueUnsync8;
70 typedef android::hardware::MQDescriptor<uint8_t, kUnsynchronizedWrite> HidlMQDescUnsync8;
71 typedef android::AidlMessageQueue<int8_t, UnsynchronizedWrite> AidlMessageQueueUnsync8;
72 typedef aidl::android::hardware::common::fmq::MQDescriptor<int8_t, UnsynchronizedWrite>
73         AidlMQDescUnsync8;
74 typedef android::AidlMessageQueueCpp<int8_t, cppUnSynchronizedWrite> cppAidlMessageQueueUnsync8;
75 typedef android::hardware::common::fmq::MQDescriptor<int8_t, cppUnSynchronizedWrite>
76         cppAidlMQDescUnsync8;
77 
78 enum class SetupType {
79     SINGLE_FD,
80     DOUBLE_FD,
81 };
82 
83 template <typename T, SetupType setupType>
84 class TestParamTypes {
85   public:
86     typedef T MQType;
87     static constexpr SetupType Setup = setupType;
88 };
89 
90 // Run everything on both the AIDL and HIDL versions with one and two FDs
91 typedef ::testing::Types<TestParamTypes<AidlMessageQueueSync, SetupType::SINGLE_FD>,
92                          TestParamTypes<cppAidlMessageQueueSync, SetupType::SINGLE_FD>,
93                          TestParamTypes<MessageQueueSync, SetupType::SINGLE_FD>,
94                          TestParamTypes<AidlMessageQueueSync, SetupType::DOUBLE_FD>,
95                          TestParamTypes<cppAidlMessageQueueSync, SetupType::DOUBLE_FD>,
96                          TestParamTypes<MessageQueueSync, SetupType::DOUBLE_FD>>
97         SyncTypes;
98 typedef ::testing::Types<TestParamTypes<AidlMessageQueueUnsync, SetupType::SINGLE_FD>,
99                          TestParamTypes<cppAidlMessageQueueUnsync, SetupType::SINGLE_FD>,
100                          TestParamTypes<MessageQueueUnsync, SetupType::SINGLE_FD>,
101                          TestParamTypes<AidlMessageQueueUnsync, SetupType::DOUBLE_FD>,
102                          TestParamTypes<cppAidlMessageQueueUnsync, SetupType::DOUBLE_FD>,
103                          TestParamTypes<MessageQueueUnsync, SetupType::DOUBLE_FD>>
104         UnsyncTypes;
105 typedef ::testing::Types<TestParamTypes<AidlMessageQueueUnsync16, SetupType::SINGLE_FD>,
106                          TestParamTypes<cppAidlMessageQueueUnsync16, SetupType::SINGLE_FD>,
107                          TestParamTypes<MessageQueueUnsync16, SetupType::SINGLE_FD>,
108                          TestParamTypes<AidlMessageQueueUnsync16, SetupType::DOUBLE_FD>,
109                          TestParamTypes<cppAidlMessageQueueUnsync16, SetupType::DOUBLE_FD>,
110                          TestParamTypes<MessageQueueUnsync16, SetupType::DOUBLE_FD>>
111         TwoByteUnsyncTypes;
112 typedef ::testing::Types<TestParamTypes<AidlMessageQueueSync16, SetupType::SINGLE_FD>,
113                          TestParamTypes<cppAidlMessageQueueSync16, SetupType::SINGLE_FD>,
114                          TestParamTypes<MessageQueueSync16, SetupType::SINGLE_FD>,
115                          TestParamTypes<AidlMessageQueueSync16, SetupType::DOUBLE_FD>,
116                          TestParamTypes<cppAidlMessageQueueSync16, SetupType::DOUBLE_FD>,
117                          TestParamTypes<MessageQueueSync16, SetupType::DOUBLE_FD>>
118         BadConfigTypes;
119 
120 template <typename T>
121 class TestBase : public ::testing::Test {
122   public:
123     static void ReaderThreadBlocking(typename T::MQType* fmq, std::atomic<uint32_t>* fwAddr);
124     static void ReaderThreadBlocking2(typename T::MQType* fmq, std::atomic<uint32_t>* fwAddr);
125 };
126 
127 TYPED_TEST_CASE(SynchronizedReadWrites, SyncTypes);
128 
129 template <typename T>
130 class SynchronizedReadWrites : public TestBase<T> {
131   protected:
TearDown()132     virtual void TearDown() {
133         delete mQueue;
134     }
135 
SetUp()136     virtual void SetUp() {
137         static constexpr size_t kNumElementsInQueue = 2048;
138         static constexpr size_t kPayloadSizeBytes = 1;
139         if (T::Setup == SetupType::SINGLE_FD) {
140             mQueue = new (std::nothrow) typename T::MQType(kNumElementsInQueue);
141         } else {
142             android::base::unique_fd ringbufferFd(::ashmem_create_region(
143                     "SyncReadWrite", kNumElementsInQueue * kPayloadSizeBytes));
144             mQueue = new (std::nothrow)
145                     typename T::MQType(kNumElementsInQueue, false, std::move(ringbufferFd),
146                                        kNumElementsInQueue * kPayloadSizeBytes);
147         }
148         ASSERT_NE(nullptr, mQueue);
149         ASSERT_TRUE(mQueue->isValid());
150         mNumMessagesMax = mQueue->getQuantumCount();
151         ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
152     }
153 
154     typename T::MQType* mQueue = nullptr;
155     size_t mNumMessagesMax = 0;
156 };
157 
158 TYPED_TEST_CASE(UnsynchronizedReadWriteTest, UnsyncTypes);
159 
160 template <typename T>
161 class UnsynchronizedReadWriteTest : public TestBase<T> {
162   protected:
TearDown()163     virtual void TearDown() {
164         delete mQueue;
165     }
166 
SetUp()167     virtual void SetUp() {
168         static constexpr size_t kNumElementsInQueue = 2048;
169         static constexpr size_t kPayloadSizeBytes = 1;
170         if (T::Setup == SetupType::SINGLE_FD) {
171             mQueue = new (std::nothrow) typename T::MQType(kNumElementsInQueue);
172         } else {
173             android::base::unique_fd ringbufferFd(
174                     ::ashmem_create_region("UnsyncWrite", kNumElementsInQueue * kPayloadSizeBytes));
175             mQueue = new (std::nothrow)
176                     typename T::MQType(kNumElementsInQueue, false, std::move(ringbufferFd),
177                                        kNumElementsInQueue * kPayloadSizeBytes);
178         }
179         ASSERT_NE(nullptr, mQueue);
180         ASSERT_TRUE(mQueue->isValid());
181         mNumMessagesMax = mQueue->getQuantumCount();
182         ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
183     }
184 
185     typename T::MQType* mQueue = nullptr;
186     size_t mNumMessagesMax = 0;
187 };
188 
189 TYPED_TEST_CASE(BlockingReadWrites, SyncTypes);
190 
191 template <typename T>
192 class BlockingReadWrites : public TestBase<T> {
193   protected:
TearDown()194     virtual void TearDown() {
195         delete mQueue;
196     }
SetUp()197     virtual void SetUp() {
198         static constexpr size_t kNumElementsInQueue = 2048;
199         static constexpr size_t kPayloadSizeBytes = 1;
200         if (T::Setup == SetupType::SINGLE_FD) {
201             mQueue = new (std::nothrow) typename T::MQType(kNumElementsInQueue);
202         } else {
203             android::base::unique_fd ringbufferFd(::ashmem_create_region(
204                     "SyncBlockingReadWrite", kNumElementsInQueue * kPayloadSizeBytes));
205             mQueue = new (std::nothrow)
206                     typename T::MQType(kNumElementsInQueue, false, std::move(ringbufferFd),
207                                        kNumElementsInQueue * kPayloadSizeBytes);
208         }
209         ASSERT_NE(nullptr, mQueue);
210         ASSERT_TRUE(mQueue->isValid());
211         mNumMessagesMax = mQueue->getQuantumCount();
212         ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
213         /*
214          * Initialize the EventFlag word to indicate Queue is not full.
215          */
216         std::atomic_init(&mFw, static_cast<uint32_t>(kFmqNotFull));
217     }
218 
219     typename T::MQType* mQueue;
220     std::atomic<uint32_t> mFw;
221     size_t mNumMessagesMax = 0;
222 };
223 
224 TYPED_TEST_CASE(QueueSizeOdd, SyncTypes);
225 
226 template <typename T>
227 class QueueSizeOdd : public TestBase<T> {
228   protected:
TearDown()229     virtual void TearDown() { delete mQueue; }
SetUp()230     virtual void SetUp() {
231         static constexpr size_t kNumElementsInQueue = 2049;
232         static constexpr size_t kPayloadSizeBytes = 1;
233         if (T::Setup == SetupType::SINGLE_FD) {
234             mQueue = new (std::nothrow)
235                     typename T::MQType(kNumElementsInQueue, true /* configureEventFlagWord */);
236         } else {
237             android::base::unique_fd ringbufferFd(
238                     ::ashmem_create_region("SyncSizeOdd", kNumElementsInQueue * kPayloadSizeBytes));
239             mQueue = new (std::nothrow) typename T::MQType(
240                     kNumElementsInQueue, true /* configureEventFlagWord */, std::move(ringbufferFd),
241                     kNumElementsInQueue * kPayloadSizeBytes);
242         }
243         ASSERT_NE(nullptr, mQueue);
244         ASSERT_TRUE(mQueue->isValid());
245         mNumMessagesMax = mQueue->getQuantumCount();
246         ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
247         auto evFlagWordPtr = mQueue->getEventFlagWord();
248         ASSERT_NE(nullptr, evFlagWordPtr);
249         /*
250          * Initialize the EventFlag word to indicate Queue is not full.
251          */
252         std::atomic_init(evFlagWordPtr, static_cast<uint32_t>(kFmqNotFull));
253     }
254 
255     typename T::MQType* mQueue;
256     size_t mNumMessagesMax = 0;
257 };
258 
259 TYPED_TEST_CASE(BadQueueConfig, BadConfigTypes);
260 
261 TYPED_TEST_CASE(UnsynchronizedOverflowHistoryTest, TwoByteUnsyncTypes);
262 
263 template <typename T>
264 class UnsynchronizedOverflowHistoryTest : public TestBase<T> {
265   protected:
TearDown()266     virtual void TearDown() { delete mQueue; }
267 
SetUp()268     virtual void SetUp() {
269         static constexpr size_t kNumElementsInQueue = 2048;
270         static constexpr size_t kPayloadSizeBytes = 2;
271         if (T::Setup == SetupType::SINGLE_FD) {
272             mQueue = new (std::nothrow) typename T::MQType(kNumElementsInQueue);
273         } else {
274             android::base::unique_fd ringbufferFd(::ashmem_create_region(
275                     "UnsyncHistory", kNumElementsInQueue * kPayloadSizeBytes));
276             mQueue = new (std::nothrow)
277                     typename T::MQType(kNumElementsInQueue, false, std::move(ringbufferFd),
278                                        kNumElementsInQueue * kPayloadSizeBytes);
279         }
280         ASSERT_NE(nullptr, mQueue);
281         ASSERT_TRUE(mQueue->isValid());
282         mNumMessagesMax = mQueue->getQuantumCount();
283         ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
284     }
285 
286     typename T::MQType* mQueue = nullptr;
287     size_t mNumMessagesMax = 0;
288 };
289 
290 TYPED_TEST_CASE(UnsynchronizedOverflowHistoryTestSingleElement, TwoByteUnsyncTypes);
291 
292 template <typename T>
293 class UnsynchronizedOverflowHistoryTestSingleElement : public TestBase<T> {
294   protected:
TearDown()295     virtual void TearDown() { delete mQueue; }
296 
SetUp()297     virtual void SetUp() {
298         static constexpr size_t kNumElementsInQueue = 1;
299         static constexpr size_t kPayloadSizeBytes = 2;
300         if (T::Setup == SetupType::SINGLE_FD) {
301             mQueue = new (std::nothrow) typename T::MQType(kNumElementsInQueue);
302         } else {
303             android::base::unique_fd ringbufferFd(::ashmem_create_region(
304                     "UnsyncHistory", kNumElementsInQueue * kPayloadSizeBytes));
305             mQueue = new (std::nothrow)
306                     typename T::MQType(kNumElementsInQueue, false, std::move(ringbufferFd),
307                                        kNumElementsInQueue * kPayloadSizeBytes);
308         }
309         ASSERT_NE(nullptr, mQueue);
310         ASSERT_TRUE(mQueue->isValid());
311         mNumMessagesMax = mQueue->getQuantumCount();
312         ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
313     }
314 
315     typename T::MQType* mQueue = nullptr;
316     size_t mNumMessagesMax = 0;
317 };
318 
319 template <typename T>
320 class BadQueueConfig : public TestBase<T> {};
321 
322 class AidlOnlyBadQueueConfig : public ::testing::Test {};
323 class HidlOnlyBadQueueConfig : public ::testing::Test {};
324 class Hidl2AidlOperation : public ::testing::Test {};
325 class DoubleFdFailures : public ::testing::Test {};
326 
327 /*
328  * Utility function to initialize data to be written to the FMQ
329  */
330 template <typename T>
initData(T * data,size_t count)331 inline void initData(T* data, size_t count) {
332     for (size_t i = 0; i < count; i++) {
333         data[i] = i & 0xFF;
334     }
335 }
336 
337 /*
338  * This thread will attempt to read and block. When wait returns
339  * it checks if the kFmqNotEmpty bit is actually set.
340  * If the read is succesful, it signals Wake to kFmqNotFull.
341  */
342 template <typename T>
ReaderThreadBlocking(typename T::MQType * fmq,std::atomic<uint32_t> * fwAddr)343 void TestBase<T>::ReaderThreadBlocking(typename T::MQType* fmq, std::atomic<uint32_t>* fwAddr) {
344     const size_t dataLen = 64;
345     uint8_t data[dataLen];
346     android::hardware::EventFlag* efGroup = nullptr;
347     android::status_t status = android::hardware::EventFlag::createEventFlag(fwAddr, &efGroup);
348     ASSERT_EQ(android::NO_ERROR, status);
349     ASSERT_NE(nullptr, efGroup);
350 
351     while (true) {
352         uint32_t efState = 0;
353         android::status_t ret = efGroup->wait(kFmqNotEmpty,
354                                               &efState,
355                                               5000000000 /* timeoutNanoSeconds */);
356         /*
357          * Wait should not time out here after 5s
358          */
359         ASSERT_NE(android::TIMED_OUT, ret);
360 
361         if ((efState & kFmqNotEmpty) && fmq->read(data, dataLen)) {
362             efGroup->wake(kFmqNotFull);
363             break;
364         }
365     }
366 
367     status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
368     ASSERT_EQ(android::NO_ERROR, status);
369 }
370 
371 /*
372  * This thread will attempt to read and block using the readBlocking() API and
373  * passes in a pointer to an EventFlag object.
374  */
375 template <typename T>
ReaderThreadBlocking2(typename T::MQType * fmq,std::atomic<uint32_t> * fwAddr)376 void TestBase<T>::ReaderThreadBlocking2(typename T::MQType* fmq, std::atomic<uint32_t>* fwAddr) {
377     const size_t dataLen = 64;
378     uint8_t data[dataLen];
379     android::hardware::EventFlag* efGroup = nullptr;
380     android::status_t status = android::hardware::EventFlag::createEventFlag(fwAddr, &efGroup);
381     ASSERT_EQ(android::NO_ERROR, status);
382     ASSERT_NE(nullptr, efGroup);
383     bool ret = fmq->readBlocking(data,
384                                  dataLen,
385                                  static_cast<uint32_t>(kFmqNotFull),
386                                  static_cast<uint32_t>(kFmqNotEmpty),
387                                  5000000000 /* timeOutNanos */,
388                                  efGroup);
389     ASSERT_TRUE(ret);
390     status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
391     ASSERT_EQ(android::NO_ERROR, status);
392 }
393 
TYPED_TEST(BadQueueConfig,QueueSizeTooLarge)394 TYPED_TEST(BadQueueConfig, QueueSizeTooLarge) {
395     size_t numElementsInQueue = SIZE_MAX / sizeof(uint16_t) + 1;
396     typename TypeParam::MQType fmq(numElementsInQueue);
397     /*
398      * Should fail due to size being too large to fit into size_t.
399      */
400     ASSERT_FALSE(fmq.isValid());
401 }
402 
403 // {flags, fdIndex, offset, extent}
404 static const std::vector<android::hardware::GrantorDescriptor> kGrantors = {
405         {0, 0, 0, 4096},
406         {0, 0, 0, 4096},
407         {0, 0, 0, 4096},
408 };
409 
410 // Make sure this passes without invalid index/extent for the next two test
411 // cases
TEST_F(HidlOnlyBadQueueConfig,SanityCheck)412 TEST_F(HidlOnlyBadQueueConfig, SanityCheck) {
413     std::vector<android::hardware::GrantorDescriptor> grantors = kGrantors;
414 
415     native_handle_t* handle = native_handle_create(1, 0);
416     int ashmemFd = ashmem_create_region("QueueHidlOnlyBad", 4096);
417     ashmem_set_prot_region(ashmemFd, PROT_READ | PROT_WRITE);
418     handle->data[0] = ashmemFd;
419 
420     android::hardware::MQDescriptor<uint16_t, kSynchronizedReadWrite> desc(grantors, handle,
421                                                                            sizeof(uint16_t));
422     android::hardware::MessageQueue<uint16_t, kSynchronizedReadWrite> fmq(desc);
423     EXPECT_TRUE(fmq.isValid());
424 
425     close(ashmemFd);
426 }
427 
TEST_F(HidlOnlyBadQueueConfig,BadFdIndex)428 TEST_F(HidlOnlyBadQueueConfig, BadFdIndex) {
429     std::vector<android::hardware::GrantorDescriptor> grantors = kGrantors;
430     grantors[0].fdIndex = 5;
431 
432     native_handle_t* handle = native_handle_create(1, 0);
433     int ashmemFd = ashmem_create_region("QueueHidlOnlyBad", 4096);
434     ashmem_set_prot_region(ashmemFd, PROT_READ | PROT_WRITE);
435     handle->data[0] = ashmemFd;
436 
437     android::hardware::MQDescriptor<uint16_t, kSynchronizedReadWrite> desc(grantors, handle,
438                                                                            sizeof(uint16_t));
439     android::hardware::MessageQueue<uint16_t, kSynchronizedReadWrite> fmq(desc);
440     /*
441      * Should fail due fdIndex being out of range of the native_handle.
442      */
443     EXPECT_FALSE(fmq.isValid());
444 
445     close(ashmemFd);
446 }
447 
TEST_F(HidlOnlyBadQueueConfig,ExtentTooLarge)448 TEST_F(HidlOnlyBadQueueConfig, ExtentTooLarge) {
449     std::vector<android::hardware::GrantorDescriptor> grantors = kGrantors;
450     grantors[0].extent = 0xfffff041;
451 
452     native_handle_t* handle = native_handle_create(1, 0);
453     int ashmemFd = ashmem_create_region("QueueHidlOnlyBad", 4096);
454     ashmem_set_prot_region(ashmemFd, PROT_READ | PROT_WRITE);
455     handle->data[0] = ashmemFd;
456 
457     android::hardware::MQDescriptor<uint16_t, kSynchronizedReadWrite> desc(grantors, handle,
458                                                                            sizeof(uint16_t));
459     android::hardware::MessageQueue<uint16_t, kSynchronizedReadWrite> fmq(desc);
460     /*
461      * Should fail due to extent being too large.
462      */
463     EXPECT_FALSE(fmq.isValid());
464 
465     close(ashmemFd);
466 }
467 
numFds()468 long numFds() {
469     return std::distance(std::filesystem::directory_iterator("/proc/self/fd"),
470                          std::filesystem::directory_iterator{});
471 }
472 
TEST_F(AidlOnlyBadQueueConfig,LookForLeakedFds)473 TEST_F(AidlOnlyBadQueueConfig, LookForLeakedFds) {
474     // Write a log msg first to open the pmsg FD and socket to logd.
475     LOG(INFO) << "Nothin' to see here...";
476     // create/destroy a large number of queues that if we were leaking FDs
477     // we could detect it by looking at the number of FDs opened by the this
478     // test process.
479     constexpr uint32_t kNumQueues = 100;
480     const size_t kPageSize = getpagesize();
481     size_t numElementsInQueue = SIZE_MAX / sizeof(uint32_t) - kPageSize - 1;
482     long numFdsBefore = numFds();
483     for (int i = 0; i < kNumQueues; i++) {
484         android::AidlMessageQueue<uint32_t, SynchronizedReadWrite> fmq(numElementsInQueue);
485         ASSERT_FALSE(fmq.isValid());
486     }
487     long numFdsAfter = numFds();
488     EXPECT_LT(numFdsAfter, kNumQueues);
489     EXPECT_EQ(numFdsAfter, numFdsBefore);
490 }
491 
TEST_F(Hidl2AidlOperation,ConvertDescriptorsSync)492 TEST_F(Hidl2AidlOperation, ConvertDescriptorsSync) {
493     size_t numElementsInQueue = 64;
494 
495     // Create HIDL side and get MQDescriptor
496     MessageQueueSync8 fmq(numElementsInQueue);
497     ASSERT_TRUE(fmq.isValid());
498     const HidlMQDescSync8* hidlDesc = fmq.getDesc();
499     ASSERT_NE(nullptr, hidlDesc);
500 
501     // Create AIDL MQDescriptor to send to another process based off the HIDL MQDescriptor
502     AidlMQDescSync8 aidlDesc;
503     android::unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(*hidlDesc,
504                                                                                   &aidlDesc);
505 
506     // Other process will create the other side of the queue using the AIDL MQDescriptor
507     AidlMessageQueueSync8 aidlFmq(aidlDesc);
508     ASSERT_TRUE(aidlFmq.isValid());
509 
510     // Make sure a write to the HIDL side, will show up for the AIDL side
511     constexpr size_t dataLen = 4;
512     uint8_t data[dataLen] = {12, 11, 10, 9};
513     fmq.write(data, dataLen);
514 
515     int8_t readData[dataLen];
516     ASSERT_TRUE(aidlFmq.read(readData, dataLen));
517 
518     ASSERT_EQ(data[0], readData[0]);
519     ASSERT_EQ(data[1], readData[1]);
520     ASSERT_EQ(data[2], readData[2]);
521     ASSERT_EQ(data[3], readData[3]);
522 }
523 
TEST_F(Hidl2AidlOperation,ConvertDescriptorsUnsync)524 TEST_F(Hidl2AidlOperation, ConvertDescriptorsUnsync) {
525     size_t numElementsInQueue = 64;
526 
527     // Create HIDL side and get MQDescriptor
528     MessageQueueUnsync8 fmq(numElementsInQueue);
529     ASSERT_TRUE(fmq.isValid());
530     const HidlMQDescUnsync8* hidlDesc = fmq.getDesc();
531     ASSERT_NE(nullptr, hidlDesc);
532 
533     // Create AIDL MQDescriptor to send to another process based off the HIDL MQDescriptor
534     AidlMQDescUnsync8 aidlDesc;
535     android::unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, UnsynchronizedWrite>(*hidlDesc,
536                                                                                 &aidlDesc);
537 
538     // Other process will create the other side of the queue using the AIDL MQDescriptor
539     AidlMessageQueueUnsync8 aidlFmq(aidlDesc);
540     ASSERT_TRUE(aidlFmq.isValid());
541 
542     // Can have multiple readers with unsync flavor
543     AidlMessageQueueUnsync8 aidlFmq2(aidlDesc);
544     ASSERT_TRUE(aidlFmq2.isValid());
545 
546     // Make sure a write to the HIDL side, will show up for the AIDL side
547     constexpr size_t dataLen = 4;
548     uint8_t data[dataLen] = {12, 11, 10, 9};
549     fmq.write(data, dataLen);
550 
551     int8_t readData[dataLen];
552     ASSERT_TRUE(aidlFmq.read(readData, dataLen));
553     int8_t readData2[dataLen];
554     ASSERT_TRUE(aidlFmq2.read(readData2, dataLen));
555 
556     ASSERT_EQ(data[0], readData[0]);
557     ASSERT_EQ(data[1], readData[1]);
558     ASSERT_EQ(data[2], readData[2]);
559     ASSERT_EQ(data[3], readData[3]);
560     ASSERT_EQ(data[0], readData2[0]);
561     ASSERT_EQ(data[1], readData2[1]);
562     ASSERT_EQ(data[2], readData2[2]);
563     ASSERT_EQ(data[3], readData2[3]);
564 }
565 
TEST_F(Hidl2AidlOperation,ConvertFdIndex1)566 TEST_F(Hidl2AidlOperation, ConvertFdIndex1) {
567     native_handle_t* mqHandle = native_handle_create(2 /* numFds */, 0 /* numInts */);
568     if (mqHandle == nullptr) {
569         return;
570     }
571     mqHandle->data[0] = 12;
572     mqHandle->data[1] = 5;
573     ::android::hardware::hidl_vec<android::hardware::GrantorDescriptor> grantors;
574     grantors.resize(3);
575     grantors[0] = {0, 1 /* fdIndex */, 16, 16};
576     grantors[1] = {0, 1 /* fdIndex */, 16, 16};
577     grantors[2] = {0, 1 /* fdIndex */, 16, 16};
578 
579     HidlMQDescUnsync8 hidlDesc(grantors, mqHandle, 10);
580     ASSERT_TRUE(hidlDesc.isHandleValid());
581 
582     AidlMQDescUnsync8 aidlDesc;
583     bool ret = android::unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, UnsynchronizedWrite>(
584             hidlDesc, &aidlDesc);
585     ASSERT_TRUE(ret);
586 }
587 
TEST_F(Hidl2AidlOperation,ConvertMultipleFds)588 TEST_F(Hidl2AidlOperation, ConvertMultipleFds) {
589     native_handle_t* mqHandle = native_handle_create(2 /* numFds */, 0 /* numInts */);
590     if (mqHandle == nullptr) {
591         return;
592     }
593     mqHandle->data[0] = ::ashmem_create_region("ConvertMultipleFds", 8);
594     mqHandle->data[1] = ::ashmem_create_region("ConvertMultipleFds2", 8);
595     ::android::hardware::hidl_vec<android::hardware::GrantorDescriptor> grantors;
596     grantors.resize(3);
597     grantors[0] = {0, 1 /* fdIndex */, 16, 16};
598     grantors[1] = {0, 1 /* fdIndex */, 16, 16};
599     grantors[2] = {0, 0 /* fdIndex */, 16, 16};
600 
601     HidlMQDescUnsync8 hidlDesc(grantors, mqHandle, 10);
602     ASSERT_TRUE(hidlDesc.isHandleValid());
603 
604     AidlMQDescUnsync8 aidlDesc;
605     bool ret = android::unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, UnsynchronizedWrite>(
606             hidlDesc, &aidlDesc);
607     ASSERT_TRUE(ret);
608     EXPECT_EQ(aidlDesc.handle.fds.size(), 2);
609 }
610 
611 // TODO(b/165674950) Since AIDL does not support unsigned integers, it can only support
612 // 1/2 the queue size of HIDL. Once support is added to AIDL, this restriction can be
613 // lifted. Until then, check against SSIZE_MAX instead of SIZE_MAX.
TEST_F(AidlOnlyBadQueueConfig,QueueSizeTooLargeForAidl)614 TEST_F(AidlOnlyBadQueueConfig, QueueSizeTooLargeForAidl) {
615     size_t numElementsInQueue = SSIZE_MAX / sizeof(uint16_t) + 1;
616     AidlMessageQueueSync16 fmq(numElementsInQueue);
617     /*
618      * Should fail due to size being too large to fit into size_t.
619      */
620     ASSERT_FALSE(fmq.isValid());
621 }
622 
TEST_F(AidlOnlyBadQueueConfig,NegativeAidlDescriptor)623 TEST_F(AidlOnlyBadQueueConfig, NegativeAidlDescriptor) {
624     aidl::android::hardware::common::fmq::MQDescriptor<uint16_t, SynchronizedReadWrite> desc;
625     desc.quantum = -10;
626     AidlMessageQueueSync16 fmq(desc);
627     /*
628      * Should fail due to quantum being negative.
629      */
630     ASSERT_FALSE(fmq.isValid());
631 }
632 
TEST_F(AidlOnlyBadQueueConfig,NegativeAidlDescriptorGrantor)633 TEST_F(AidlOnlyBadQueueConfig, NegativeAidlDescriptorGrantor) {
634     aidl::android::hardware::common::fmq::MQDescriptor<uint16_t, SynchronizedReadWrite> desc;
635     desc.quantum = 2;
636     desc.flags = 0;
637     desc.grantors.push_back(
638             aidl::android::hardware::common::fmq::GrantorDescriptor{.offset = 12, .extent = -10});
639     AidlMessageQueueSync16 fmq(desc);
640     /*
641      * Should fail due to grantor having negative extent.
642      */
643     ASSERT_FALSE(fmq.isValid());
644 }
645 
646 /*
647  * Test creating a new queue from a modified MQDescriptor of another queue.
648  * If MQDescriptor.quantum doesn't match the size of the payload(T), the queue
649  * should be invalid.
650  */
TEST_F(AidlOnlyBadQueueConfig,MismatchedPayloadSize)651 TEST_F(AidlOnlyBadQueueConfig, MismatchedPayloadSize) {
652     AidlMessageQueueSync16 fmq = AidlMessageQueueSync16(64);
653     aidl::android::hardware::common::fmq::MQDescriptor<uint16_t, SynchronizedReadWrite> desc =
654             fmq.dupeDesc();
655     // This should work fine with the unmodified MQDescriptor
656     AidlMessageQueueSync16 fmq2 = AidlMessageQueueSync16(desc);
657     ASSERT_TRUE(fmq2.isValid());
658 
659     // Simulate a difference in payload size between processes handling the queue
660     desc.quantum = 8;
661     AidlMessageQueueSync16 fmq3 = AidlMessageQueueSync16(desc);
662 
663     // Should fail due to the quantum not matching the sizeof(uint16_t)
664     ASSERT_FALSE(fmq3.isValid());
665 }
666 
667 /*
668  * Test creating a new queue with an invalid fd. This should assert with message
669  * "mRing is null".
670  */
TEST_F(DoubleFdFailures,InvalidFd)671 TEST_F(DoubleFdFailures, InvalidFd) {
672     android::base::SetLogger(android::base::StdioLogger);
673     auto queue = AidlMessageQueueSync(64, false, android::base::unique_fd(3000), 64);
674     EXPECT_FALSE(queue.isValid());
675 }
676 
677 /*
678  * Test creating a new queue with a buffer fd and bufferSize smaller than the
679  * requested queue. This should fail to create a valid message queue.
680  */
TEST_F(DoubleFdFailures,InvalidFdSize)681 TEST_F(DoubleFdFailures, InvalidFdSize) {
682     constexpr size_t kNumElementsInQueue = 1024;
683     constexpr size_t kRequiredDataBufferSize = kNumElementsInQueue * sizeof(uint16_t);
684     android::base::unique_fd ringbufferFd(
685             ::ashmem_create_region("SyncReadWrite", kRequiredDataBufferSize - 8));
686     AidlMessageQueueSync16 fmq = AidlMessageQueueSync16(
687             kNumElementsInQueue, false, std::move(ringbufferFd), kRequiredDataBufferSize - 8);
688     EXPECT_FALSE(fmq.isValid());
689 }
690 
691 /*
692  * Test creating a new queue with a buffer fd and bufferSize larger than the
693  * requested queue. The message queue should be valid.
694  */
TEST_F(DoubleFdFailures,LargerFdSize)695 TEST_F(DoubleFdFailures, LargerFdSize) {
696     constexpr size_t kNumElementsInQueue = 1024;
697     constexpr size_t kRequiredDataBufferSize = kNumElementsInQueue * sizeof(uint16_t);
698     android::base::unique_fd ringbufferFd(
699             ::ashmem_create_region("SyncReadWrite", kRequiredDataBufferSize + 8));
700     AidlMessageQueueSync16 fmq = AidlMessageQueueSync16(
701             kNumElementsInQueue, false, std::move(ringbufferFd), kRequiredDataBufferSize + 8);
702     EXPECT_TRUE(fmq.isValid());
703 }
704 
705 /*
706  * Test that basic blocking works. This test uses the non-blocking read()/write()
707  * APIs.
708  */
TYPED_TEST(BlockingReadWrites,SmallInputTest1)709 TYPED_TEST(BlockingReadWrites, SmallInputTest1) {
710     const size_t dataLen = 64;
711     uint8_t data[dataLen] = {0};
712 
713     android::hardware::EventFlag* efGroup = nullptr;
714     android::status_t status = android::hardware::EventFlag::createEventFlag(&this->mFw, &efGroup);
715 
716     ASSERT_EQ(android::NO_ERROR, status);
717     ASSERT_NE(nullptr, efGroup);
718 
719     /*
720      * Start a thread that will try to read and block on kFmqNotEmpty.
721      */
722     std::thread Reader(BlockingReadWrites<TypeParam>::ReaderThreadBlocking, this->mQueue,
723                        &this->mFw);
724     struct timespec waitTime = {0, 100 * 1000000};
725     ASSERT_EQ(0, nanosleep(&waitTime, NULL));
726 
727     /*
728      * After waiting for some time write into the FMQ
729      * and call Wake on kFmqNotEmpty.
730      */
731     ASSERT_TRUE(this->mQueue->write(data, dataLen));
732     status = efGroup->wake(kFmqNotEmpty);
733     ASSERT_EQ(android::NO_ERROR, status);
734 
735     ASSERT_EQ(0, nanosleep(&waitTime, NULL));
736     Reader.join();
737 
738     status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
739     ASSERT_EQ(android::NO_ERROR, status);
740 }
741 
742 /*
743  * Test that basic blocking works. This test uses the
744  * writeBlocking()/readBlocking() APIs.
745  */
TYPED_TEST(BlockingReadWrites,SmallInputTest2)746 TYPED_TEST(BlockingReadWrites, SmallInputTest2) {
747     const size_t dataLen = 64;
748     uint8_t data[dataLen] = {0};
749 
750     android::hardware::EventFlag* efGroup = nullptr;
751     android::status_t status = android::hardware::EventFlag::createEventFlag(&this->mFw, &efGroup);
752 
753     ASSERT_EQ(android::NO_ERROR, status);
754     ASSERT_NE(nullptr, efGroup);
755 
756     /*
757      * Start a thread that will try to read and block on kFmqNotEmpty. It will
758      * call wake() on kFmqNotFull when the read is successful.
759      */
760     std::thread Reader(BlockingReadWrites<TypeParam>::ReaderThreadBlocking2, this->mQueue,
761                        &this->mFw);
762     bool ret = this->mQueue->writeBlocking(data, dataLen, static_cast<uint32_t>(kFmqNotFull),
763                                            static_cast<uint32_t>(kFmqNotEmpty),
764                                            5000000000 /* timeOutNanos */, efGroup);
765     ASSERT_TRUE(ret);
766     Reader.join();
767 
768     status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
769     ASSERT_EQ(android::NO_ERROR, status);
770 }
771 
772 /*
773  * Test that basic blocking times out as intended.
774  */
TYPED_TEST(BlockingReadWrites,BlockingTimeOutTest)775 TYPED_TEST(BlockingReadWrites, BlockingTimeOutTest) {
776     android::hardware::EventFlag* efGroup = nullptr;
777     android::status_t status = android::hardware::EventFlag::createEventFlag(&this->mFw, &efGroup);
778 
779     ASSERT_EQ(android::NO_ERROR, status);
780     ASSERT_NE(nullptr, efGroup);
781 
782     /* Block on an EventFlag bit that no one will wake and time out in 1s */
783     uint32_t efState = 0;
784     android::status_t ret = efGroup->wait(kFmqNotEmpty,
785                                           &efState,
786                                           1000000000 /* timeoutNanoSeconds */);
787     /*
788      * Wait should time out in a second.
789      */
790     EXPECT_EQ(android::TIMED_OUT, ret);
791 
792     status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
793     ASSERT_EQ(android::NO_ERROR, status);
794 }
795 
796 /*
797  * Test EventFlag wait on a waked flag with a short timeout.
798  */
TYPED_TEST(BlockingReadWrites,ShortEventFlagWaitWithWakeTest)799 TYPED_TEST(BlockingReadWrites, ShortEventFlagWaitWithWakeTest) {
800     std::atomic<uint32_t> eventFlagWord;
801     std::atomic_init(&eventFlagWord, static_cast<uint32_t>(kFmqNotFull));
802     android::hardware::EventFlag* efGroup = nullptr;
803     android::status_t status =
804             android::hardware::EventFlag::createEventFlag(&eventFlagWord, &efGroup);
805     ASSERT_EQ(android::NO_ERROR, status);
806     ASSERT_NE(nullptr, efGroup);
807 
808     status = efGroup->wake(kFmqNotEmpty);
809     ASSERT_EQ(android::NO_ERROR, status);
810 
811     uint32_t efState = 0;
812     android::status_t ret = efGroup->wait(kFmqNotEmpty, &efState, 1 /* ns */, true /* retry */);
813     ASSERT_EQ(android::NO_ERROR, ret);
814 
815     status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
816     ASSERT_EQ(android::NO_ERROR, status);
817 }
818 
819 /*
820  * Test on an EventFlag with no wakeup, short timeout.
821  */
TYPED_TEST(BlockingReadWrites,ShortEventFlagWaitWithoutWakeTest)822 TYPED_TEST(BlockingReadWrites, ShortEventFlagWaitWithoutWakeTest) {
823     std::atomic<uint32_t> eventFlagWord;
824     std::atomic_init(&eventFlagWord, static_cast<uint32_t>(kFmqNotFull));
825     android::hardware::EventFlag* efGroup = nullptr;
826     android::status_t status =
827             android::hardware::EventFlag::createEventFlag(&eventFlagWord, &efGroup);
828     ASSERT_EQ(android::NO_ERROR, status);
829     ASSERT_NE(nullptr, efGroup);
830 
831     uint32_t efState = 0;
832     android::status_t ret = efGroup->wait(kFmqNotEmpty, &efState, 1 /* ns */, true /* retry */);
833     ASSERT_EQ(android::TIMED_OUT, ret);
834 
835     status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
836     ASSERT_EQ(android::NO_ERROR, status);
837 }
838 
839 /*
840  * Test FMQ write and read with event flag wait.
841  */
TYPED_TEST(BlockingReadWrites,FmqWriteAndReadWithShortEventFlagWaitTest)842 TYPED_TEST(BlockingReadWrites, FmqWriteAndReadWithShortEventFlagWaitTest) {
843     android::hardware::EventFlag* efGroup = nullptr;
844     android::status_t status = android::hardware::EventFlag::createEventFlag(&this->mFw, &efGroup);
845     ASSERT_EQ(android::NO_ERROR, status);
846     ASSERT_NE(nullptr, efGroup);
847 
848     /*
849      * After waiting for some time write into the FMQ
850      * and call Wake on kFmqNotEmpty.
851      */
852     const size_t dataLen = 16;
853     uint8_t dataW[dataLen] = {0};
854     uint8_t dataR[dataLen] = {0};
855     ASSERT_TRUE(this->mQueue->write(dataW, dataLen));
856     status = efGroup->wake(kFmqNotEmpty);
857     ASSERT_EQ(android::NO_ERROR, status);
858 
859     ASSERT_TRUE(this->mQueue->readBlocking(dataR, dataLen, static_cast<uint32_t>(kFmqNotEmpty),
860                                            static_cast<uint32_t>(kFmqNotFull), 1 /* timeOutNanos */,
861                                            efGroup));
862     ASSERT_EQ(0, memcmp(dataW, dataR, dataLen));
863 
864     status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
865     ASSERT_EQ(android::NO_ERROR, status);
866 }
867 
868 /*
869  * Test that odd queue sizes do not cause unaligned error
870  * on access to EventFlag object.
871  */
TYPED_TEST(QueueSizeOdd,EventFlagTest)872 TYPED_TEST(QueueSizeOdd, EventFlagTest) {
873     const size_t dataLen = 64;
874     uint8_t data[dataLen] = {0};
875 
876     bool ret = this->mQueue->writeBlocking(data, dataLen, static_cast<uint32_t>(kFmqNotFull),
877                                            static_cast<uint32_t>(kFmqNotEmpty),
878                                            5000000000 /* timeOutNanos */);
879     ASSERT_TRUE(ret);
880 }
881 
882 /*
883  * Verify that a few bytes of data can be successfully written and read.
884  */
TYPED_TEST(SynchronizedReadWrites,SmallInputTest1)885 TYPED_TEST(SynchronizedReadWrites, SmallInputTest1) {
886     const size_t dataLen = 16;
887     ASSERT_LE(dataLen, this->mNumMessagesMax);
888     uint8_t data[dataLen];
889 
890     initData(data, dataLen);
891 
892     ASSERT_TRUE(this->mQueue->write(data, dataLen));
893     uint8_t readData[dataLen] = {};
894     ASSERT_TRUE(this->mQueue->read(readData, dataLen));
895     ASSERT_EQ(0, memcmp(data, readData, dataLen));
896 }
897 
898 /*
899  * Verify that a few bytes of data can be successfully written and read using
900  * beginRead/beginWrite/CommitRead/CommitWrite
901  */
TYPED_TEST(SynchronizedReadWrites,SmallInputTest2)902 TYPED_TEST(SynchronizedReadWrites, SmallInputTest2) {
903     const size_t dataLen = 16;
904     ASSERT_LE(dataLen, this->mNumMessagesMax);
905     uint8_t data[dataLen];
906 
907     initData(data, dataLen);
908 
909     typename TypeParam::MQType::MemTransaction tx;
910     ASSERT_TRUE(this->mQueue->beginWrite(dataLen, &tx));
911 
912     ASSERT_TRUE(tx.copyTo(data, 0 /* startIdx */, dataLen));
913 
914     ASSERT_TRUE(this->mQueue->commitWrite(dataLen));
915 
916     uint8_t readData[dataLen] = {};
917 
918     ASSERT_TRUE(this->mQueue->beginRead(dataLen, &tx));
919 
920     ASSERT_TRUE(tx.copyFrom(readData, 0 /* startIdx */, dataLen));
921 
922     ASSERT_TRUE(this->mQueue->commitRead(dataLen));
923 
924     ASSERT_EQ(0, memcmp(data, readData, dataLen));
925 }
926 
927 /*
928  * Verify that a few bytes of data can be successfully written and read using
929  * beginRead/beginWrite/CommitRead/CommitWrite as well as getSlot().
930  */
TYPED_TEST(SynchronizedReadWrites,SmallInputTest3)931 TYPED_TEST(SynchronizedReadWrites, SmallInputTest3) {
932     const size_t dataLen = 16;
933     ASSERT_LE(dataLen, this->mNumMessagesMax);
934     uint8_t data[dataLen];
935 
936     initData(data, dataLen);
937     typename TypeParam::MQType::MemTransaction tx;
938     ASSERT_TRUE(this->mQueue->beginWrite(dataLen, &tx));
939 
940     auto first = tx.getFirstRegion();
941     auto second = tx.getSecondRegion();
942 
943     ASSERT_EQ(first.getLength() + second.getLength(),  dataLen);
944     for (size_t i = 0; i < dataLen; i++) {
945         uint8_t* ptr = tx.getSlot(i);
946         *ptr = data[i];
947     }
948 
949     ASSERT_TRUE(this->mQueue->commitWrite(dataLen));
950 
951     uint8_t readData[dataLen] = {};
952 
953     ASSERT_TRUE(this->mQueue->beginRead(dataLen, &tx));
954 
955     first = tx.getFirstRegion();
956     second = tx.getSecondRegion();
957 
958     ASSERT_EQ(first.getLength() + second.getLength(),  dataLen);
959 
960     for (size_t i = 0; i < dataLen; i++) {
961         uint8_t* ptr = tx.getSlot(i);
962         readData[i] = *ptr;
963     }
964 
965     ASSERT_TRUE(this->mQueue->commitRead(dataLen));
966 
967     ASSERT_EQ(0, memcmp(data, readData, dataLen));
968 }
969 
970 /*
971  * Verify that read() returns false when trying to read from an empty queue.
972  */
TYPED_TEST(SynchronizedReadWrites,ReadWhenEmpty1)973 TYPED_TEST(SynchronizedReadWrites, ReadWhenEmpty1) {
974     ASSERT_EQ(0UL, this->mQueue->availableToRead());
975     const size_t dataLen = 2;
976     ASSERT_LE(dataLen, this->mNumMessagesMax);
977     uint8_t readData[dataLen];
978     ASSERT_FALSE(this->mQueue->read(readData, dataLen));
979 }
980 
981 /*
982  * Verify that beginRead() returns a MemTransaction object with null pointers when trying
983  * to read from an empty queue.
984  */
TYPED_TEST(SynchronizedReadWrites,ReadWhenEmpty2)985 TYPED_TEST(SynchronizedReadWrites, ReadWhenEmpty2) {
986     ASSERT_EQ(0UL, this->mQueue->availableToRead());
987     const size_t dataLen = 2;
988     ASSERT_LE(dataLen, this->mNumMessagesMax);
989 
990     typename TypeParam::MQType::MemTransaction tx;
991     ASSERT_FALSE(this->mQueue->beginRead(dataLen, &tx));
992 
993     auto first = tx.getFirstRegion();
994     auto second = tx.getSecondRegion();
995 
996     ASSERT_EQ(nullptr, first.getAddress());
997     ASSERT_EQ(nullptr, second.getAddress());
998 }
999 
1000 /*
1001  * Write the queue until full. Verify that another write is unsuccessful.
1002  * Verify that availableToWrite() returns 0 as expected.
1003  */
TYPED_TEST(SynchronizedReadWrites,WriteWhenFull1)1004 TYPED_TEST(SynchronizedReadWrites, WriteWhenFull1) {
1005     ASSERT_EQ(0UL, this->mQueue->availableToRead());
1006     std::vector<uint8_t> data(this->mNumMessagesMax);
1007 
1008     initData(&data[0], this->mNumMessagesMax);
1009     ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1010     ASSERT_EQ(0UL, this->mQueue->availableToWrite());
1011     ASSERT_FALSE(this->mQueue->write(&data[0], 1));
1012 
1013     std::vector<uint8_t> readData(this->mNumMessagesMax);
1014     ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1015     ASSERT_EQ(data, readData);
1016 }
1017 
1018 /*
1019  * Write the queue until full. Verify that beginWrite() returns
1020  * a MemTransaction object with null base pointers.
1021  */
TYPED_TEST(SynchronizedReadWrites,WriteWhenFull2)1022 TYPED_TEST(SynchronizedReadWrites, WriteWhenFull2) {
1023     ASSERT_EQ(0UL, this->mQueue->availableToRead());
1024     std::vector<uint8_t> data(this->mNumMessagesMax);
1025 
1026     initData(&data[0], this->mNumMessagesMax);
1027     ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1028     ASSERT_EQ(0UL, this->mQueue->availableToWrite());
1029 
1030     typename TypeParam::MQType::MemTransaction tx;
1031     ASSERT_FALSE(this->mQueue->beginWrite(1, &tx));
1032 
1033     auto first = tx.getFirstRegion();
1034     auto second = tx.getSecondRegion();
1035 
1036     ASSERT_EQ(nullptr, first.getAddress());
1037     ASSERT_EQ(nullptr, second.getAddress());
1038 }
1039 
1040 /*
1041  * Write a chunk of data equal to the queue size.
1042  * Verify that the write is successful and the subsequent read
1043  * returns the expected data.
1044  */
TYPED_TEST(SynchronizedReadWrites,LargeInputTest1)1045 TYPED_TEST(SynchronizedReadWrites, LargeInputTest1) {
1046     std::vector<uint8_t> data(this->mNumMessagesMax);
1047     initData(&data[0], this->mNumMessagesMax);
1048     ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1049     std::vector<uint8_t> readData(this->mNumMessagesMax);
1050     ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1051     ASSERT_EQ(data, readData);
1052 }
1053 
1054 /*
1055  * Attempt to write a chunk of data larger than the queue size.
1056  * Verify that it fails. Verify that a subsequent read fails and
1057  * the queue is still empty.
1058  */
TYPED_TEST(SynchronizedReadWrites,LargeInputTest2)1059 TYPED_TEST(SynchronizedReadWrites, LargeInputTest2) {
1060     ASSERT_EQ(0UL, this->mQueue->availableToRead());
1061     const size_t dataLen = 4096;
1062     ASSERT_GT(dataLen, this->mNumMessagesMax);
1063     std::vector<uint8_t> data(dataLen);
1064 
1065     initData(&data[0], dataLen);
1066     ASSERT_FALSE(this->mQueue->write(&data[0], dataLen));
1067     std::vector<uint8_t> readData(this->mNumMessagesMax);
1068     ASSERT_FALSE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1069     ASSERT_NE(data, readData);
1070     ASSERT_EQ(0UL, this->mQueue->availableToRead());
1071 }
1072 
1073 /*
1074  * After the queue is full, try to write more data. Verify that
1075  * the attempt returns false. Verify that the attempt did not
1076  * affect the pre-existing data in the queue.
1077  */
TYPED_TEST(SynchronizedReadWrites,LargeInputTest3)1078 TYPED_TEST(SynchronizedReadWrites, LargeInputTest3) {
1079     std::vector<uint8_t> data(this->mNumMessagesMax);
1080     initData(&data[0], this->mNumMessagesMax);
1081     ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1082     ASSERT_FALSE(this->mQueue->write(&data[0], 1));
1083     std::vector<uint8_t> readData(this->mNumMessagesMax);
1084     ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1085     ASSERT_EQ(data, readData);
1086 }
1087 
1088 /*
1089  * Verify that beginWrite() returns a MemTransaction with
1090  * null base pointers when attempting to write data larger
1091  * than the queue size.
1092  */
TYPED_TEST(SynchronizedReadWrites,LargeInputTest4)1093 TYPED_TEST(SynchronizedReadWrites, LargeInputTest4) {
1094     ASSERT_EQ(0UL, this->mQueue->availableToRead());
1095     const size_t dataLen = 4096;
1096     ASSERT_GT(dataLen, this->mNumMessagesMax);
1097 
1098     typename TypeParam::MQType::MemTransaction tx;
1099     ASSERT_FALSE(this->mQueue->beginWrite(dataLen, &tx));
1100 
1101     auto first = tx.getFirstRegion();
1102     auto second = tx.getSecondRegion();
1103 
1104     ASSERT_EQ(nullptr, first.getAddress());
1105     ASSERT_EQ(nullptr, second.getAddress());
1106 }
1107 
1108 /*
1109  * Verify that multiple reads one after the other return expected data.
1110  */
TYPED_TEST(SynchronizedReadWrites,MultipleRead)1111 TYPED_TEST(SynchronizedReadWrites, MultipleRead) {
1112     const size_t chunkSize = 100;
1113     const size_t chunkNum = 5;
1114     const size_t dataLen = chunkSize * chunkNum;
1115     ASSERT_LE(dataLen, this->mNumMessagesMax);
1116     uint8_t data[dataLen];
1117 
1118     initData(data, dataLen);
1119     ASSERT_TRUE(this->mQueue->write(data, dataLen));
1120     uint8_t readData[dataLen] = {};
1121     for (size_t i = 0; i < chunkNum; i++) {
1122         ASSERT_TRUE(this->mQueue->read(readData + i * chunkSize, chunkSize));
1123     }
1124     ASSERT_EQ(0, memcmp(readData, data, dataLen));
1125 }
1126 
1127 /*
1128  * Verify that multiple writes one after the other happens correctly.
1129  */
TYPED_TEST(SynchronizedReadWrites,MultipleWrite)1130 TYPED_TEST(SynchronizedReadWrites, MultipleWrite) {
1131     const int chunkSize = 100;
1132     const int chunkNum = 5;
1133     const size_t dataLen = chunkSize * chunkNum;
1134     ASSERT_LE(dataLen, this->mNumMessagesMax);
1135     uint8_t data[dataLen];
1136 
1137     initData(data, dataLen);
1138     for (unsigned int i = 0; i < chunkNum; i++) {
1139         ASSERT_TRUE(this->mQueue->write(data + i * chunkSize, chunkSize));
1140     }
1141     uint8_t readData[dataLen] = {};
1142     ASSERT_TRUE(this->mQueue->read(readData, dataLen));
1143     ASSERT_EQ(0, memcmp(readData, data, dataLen));
1144 }
1145 
1146 /*
1147  * Write enough messages into the FMQ to fill half of it
1148  * and read back the same.
1149  * Write this->mNumMessagesMax messages into the queue. This will cause a
1150  * wrap around. Read and verify the data.
1151  */
TYPED_TEST(SynchronizedReadWrites,ReadWriteWrapAround1)1152 TYPED_TEST(SynchronizedReadWrites, ReadWriteWrapAround1) {
1153     size_t numMessages = this->mNumMessagesMax - 1;
1154     std::vector<uint8_t> data(this->mNumMessagesMax);
1155     std::vector<uint8_t> readData(this->mNumMessagesMax);
1156     initData(&data[0], this->mNumMessagesMax);
1157     ASSERT_TRUE(this->mQueue->write(&data[0], numMessages));
1158     ASSERT_TRUE(this->mQueue->read(&readData[0], numMessages));
1159     ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1160     ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1161     ASSERT_EQ(data, readData);
1162 }
1163 
1164 /*
1165  * Use beginRead/CommitRead/beginWrite/commitWrite APIs
1166  * to test wrap arounds are handled correctly.
1167  * Write enough messages into the FMQ to fill half of it
1168  * and read back the same.
1169  * Write mNumMessagesMax messages into the queue. This will cause a
1170  * wrap around. Read and verify the data.
1171  */
TYPED_TEST(SynchronizedReadWrites,ReadWriteWrapAround2)1172 TYPED_TEST(SynchronizedReadWrites, ReadWriteWrapAround2) {
1173     size_t dataLen = this->mNumMessagesMax - 1;
1174     std::vector<uint8_t> data(this->mNumMessagesMax);
1175     std::vector<uint8_t> readData(this->mNumMessagesMax);
1176     initData(&data[0], this->mNumMessagesMax);
1177     ASSERT_TRUE(this->mQueue->write(&data[0], dataLen));
1178     ASSERT_TRUE(this->mQueue->read(&readData[0], dataLen));
1179 
1180     /*
1181      * The next write and read will have to deal with with wrap arounds.
1182      */
1183     typename TypeParam::MQType::MemTransaction tx;
1184     ASSERT_TRUE(this->mQueue->beginWrite(this->mNumMessagesMax, &tx));
1185 
1186     auto first = tx.getFirstRegion();
1187     auto second = tx.getSecondRegion();
1188 
1189     ASSERT_EQ(first.getLength() + second.getLength(), this->mNumMessagesMax);
1190 
1191     ASSERT_TRUE(tx.copyTo(&data[0], 0 /* startIdx */, this->mNumMessagesMax));
1192 
1193     ASSERT_TRUE(this->mQueue->commitWrite(this->mNumMessagesMax));
1194 
1195     ASSERT_TRUE(this->mQueue->beginRead(this->mNumMessagesMax, &tx));
1196 
1197     first = tx.getFirstRegion();
1198     second = tx.getSecondRegion();
1199 
1200     ASSERT_EQ(first.getLength() + second.getLength(), this->mNumMessagesMax);
1201 
1202     ASSERT_TRUE(tx.copyFrom(&readData[0], 0 /* startIdx */, this->mNumMessagesMax));
1203     ASSERT_TRUE(this->mQueue->commitRead(this->mNumMessagesMax));
1204 
1205     ASSERT_EQ(data, readData);
1206 }
1207 
1208 /*
1209  * Verify that a few bytes of data can be successfully written and read.
1210  */
TYPED_TEST(UnsynchronizedReadWriteTest,SmallInputTest1)1211 TYPED_TEST(UnsynchronizedReadWriteTest, SmallInputTest1) {
1212     const size_t dataLen = 16;
1213     ASSERT_LE(dataLen, this->mNumMessagesMax);
1214     uint8_t data[dataLen];
1215 
1216     initData(data, dataLen);
1217     ASSERT_TRUE(this->mQueue->write(data, dataLen));
1218     uint8_t readData[dataLen] = {};
1219     ASSERT_TRUE(this->mQueue->read(readData, dataLen));
1220     ASSERT_EQ(0, memcmp(data, readData, dataLen));
1221 }
1222 
1223 /*
1224  * Verify that read() returns false when trying to read from an empty queue.
1225  */
TYPED_TEST(UnsynchronizedReadWriteTest,ReadWhenEmpty)1226 TYPED_TEST(UnsynchronizedReadWriteTest, ReadWhenEmpty) {
1227     ASSERT_EQ(0UL, this->mQueue->availableToRead());
1228     const size_t dataLen = 2;
1229     ASSERT_TRUE(dataLen < this->mNumMessagesMax);
1230     uint8_t readData[dataLen];
1231     ASSERT_FALSE(this->mQueue->read(readData, dataLen));
1232 }
1233 
1234 /*
1235  * Write the queue when full. Verify that a subsequent writes is succesful.
1236  * Verify that availableToWrite() returns 0 as expected.
1237  */
TYPED_TEST(UnsynchronizedReadWriteTest,WriteWhenFull1)1238 TYPED_TEST(UnsynchronizedReadWriteTest, WriteWhenFull1) {
1239     ASSERT_EQ(0UL, this->mQueue->availableToRead());
1240     std::vector<uint8_t> data(this->mNumMessagesMax);
1241 
1242     initData(&data[0], this->mNumMessagesMax);
1243     ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1244     ASSERT_EQ(0UL, this->mQueue->availableToWrite());
1245     ASSERT_TRUE(this->mQueue->write(&data[0], 1));
1246 
1247     std::vector<uint8_t> readData(this->mNumMessagesMax);
1248     ASSERT_FALSE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1249 }
1250 
1251 /*
1252  * Write the queue when full. Verify that a subsequent writes
1253  * using beginRead()/commitRead() is succesful.
1254  * Verify that the next read fails as expected for unsynchronized flavor.
1255  */
TYPED_TEST(UnsynchronizedReadWriteTest,WriteWhenFull2)1256 TYPED_TEST(UnsynchronizedReadWriteTest, WriteWhenFull2) {
1257     ASSERT_EQ(0UL, this->mQueue->availableToRead());
1258     std::vector<uint8_t> data(this->mNumMessagesMax);
1259     ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1260 
1261     typename TypeParam::MQType::MemTransaction tx;
1262     ASSERT_TRUE(this->mQueue->beginWrite(1, &tx));
1263 
1264     ASSERT_EQ(tx.getFirstRegion().getLength(), 1U);
1265 
1266     ASSERT_TRUE(tx.copyTo(&data[0], 0 /* startIdx */));
1267 
1268     ASSERT_TRUE(this->mQueue->commitWrite(1));
1269 
1270     std::vector<uint8_t> readData(this->mNumMessagesMax);
1271     ASSERT_FALSE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1272 }
1273 
1274 /*
1275  * Write a chunk of data equal to the queue size.
1276  * Verify that the write is successful and the subsequent read
1277  * returns the expected data.
1278  */
TYPED_TEST(UnsynchronizedReadWriteTest,LargeInputTest1)1279 TYPED_TEST(UnsynchronizedReadWriteTest, LargeInputTest1) {
1280     std::vector<uint8_t> data(this->mNumMessagesMax);
1281     initData(&data[0], this->mNumMessagesMax);
1282     ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1283     std::vector<uint8_t> readData(this->mNumMessagesMax);
1284     ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1285     ASSERT_EQ(data, readData);
1286 }
1287 
1288 /*
1289  * Attempt to write a chunk of data larger than the queue size.
1290  * Verify that it fails. Verify that a subsequent read fails and
1291  * the queue is still empty.
1292  */
TYPED_TEST(UnsynchronizedReadWriteTest,LargeInputTest2)1293 TYPED_TEST(UnsynchronizedReadWriteTest, LargeInputTest2) {
1294     ASSERT_EQ(0UL, this->mQueue->availableToRead());
1295     const size_t dataLen = 4096;
1296     ASSERT_GT(dataLen, this->mNumMessagesMax);
1297     std::vector<uint8_t> data(dataLen);
1298     initData(&data[0], dataLen);
1299     ASSERT_FALSE(this->mQueue->write(&data[0], dataLen));
1300     std::vector<uint8_t> readData(this->mNumMessagesMax);
1301     ASSERT_FALSE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1302     ASSERT_NE(data, readData);
1303     ASSERT_EQ(0UL, this->mQueue->availableToRead());
1304 }
1305 
1306 /*
1307  * After the queue is full, try to write more data. Verify that
1308  * the attempt is succesful. Verify that the read fails
1309  * as expected.
1310  */
TYPED_TEST(UnsynchronizedReadWriteTest,LargeInputTest3)1311 TYPED_TEST(UnsynchronizedReadWriteTest, LargeInputTest3) {
1312     std::vector<uint8_t> data(this->mNumMessagesMax);
1313     initData(&data[0], this->mNumMessagesMax);
1314     ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1315     ASSERT_TRUE(this->mQueue->write(&data[0], 1));
1316     std::vector<uint8_t> readData(this->mNumMessagesMax);
1317     ASSERT_FALSE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1318 }
1319 
1320 /*
1321  * Verify that multiple reads one after the other return expected data.
1322  */
TYPED_TEST(UnsynchronizedReadWriteTest,MultipleRead)1323 TYPED_TEST(UnsynchronizedReadWriteTest, MultipleRead) {
1324     const size_t chunkSize = 100;
1325     const size_t chunkNum = 5;
1326     const size_t dataLen = chunkSize * chunkNum;
1327     ASSERT_LE(dataLen, this->mNumMessagesMax);
1328     uint8_t data[dataLen];
1329     initData(data, dataLen);
1330     ASSERT_TRUE(this->mQueue->write(data, dataLen));
1331     uint8_t readData[dataLen] = {};
1332     for (size_t i = 0; i < chunkNum; i++) {
1333         ASSERT_TRUE(this->mQueue->read(readData + i * chunkSize, chunkSize));
1334     }
1335     ASSERT_EQ(0, memcmp(readData, data, dataLen));
1336 }
1337 
1338 /*
1339  * Verify that multiple writes one after the other happens correctly.
1340  */
TYPED_TEST(UnsynchronizedReadWriteTest,MultipleWrite)1341 TYPED_TEST(UnsynchronizedReadWriteTest, MultipleWrite) {
1342     const size_t chunkSize = 100;
1343     const size_t chunkNum = 5;
1344     const size_t dataLen = chunkSize * chunkNum;
1345     ASSERT_LE(dataLen, this->mNumMessagesMax);
1346     uint8_t data[dataLen];
1347 
1348     initData(data, dataLen);
1349     for (size_t i = 0; i < chunkNum; i++) {
1350         ASSERT_TRUE(this->mQueue->write(data + i * chunkSize, chunkSize));
1351     }
1352 
1353     uint8_t readData[dataLen] = {};
1354     ASSERT_TRUE(this->mQueue->read(readData, dataLen));
1355     ASSERT_EQ(0, memcmp(readData, data, dataLen));
1356 }
1357 
1358 /*
1359  * Write enough messages into the FMQ to fill half of it
1360  * and read back the same.
1361  * Write mNumMessagesMax messages into the queue. This will cause a
1362  * wrap around. Read and verify the data.
1363  */
TYPED_TEST(UnsynchronizedReadWriteTest,ReadWriteWrapAround)1364 TYPED_TEST(UnsynchronizedReadWriteTest, ReadWriteWrapAround) {
1365     size_t numMessages = this->mNumMessagesMax - 1;
1366     std::vector<uint8_t> data(this->mNumMessagesMax);
1367     std::vector<uint8_t> readData(this->mNumMessagesMax);
1368 
1369     initData(&data[0], this->mNumMessagesMax);
1370     ASSERT_TRUE(this->mQueue->write(&data[0], numMessages));
1371     ASSERT_TRUE(this->mQueue->read(&readData[0], numMessages));
1372     ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1373     ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1374     ASSERT_EQ(data, readData);
1375 }
1376 
1377 /*
1378  * Attempt to read more than the maximum number of messages in the queue.
1379  */
TYPED_TEST(UnsynchronizedReadWriteTest,ReadMoreThanNumMessagesMaxFails)1380 TYPED_TEST(UnsynchronizedReadWriteTest, ReadMoreThanNumMessagesMaxFails) {
1381     // Fill the queue with data
1382     std::vector<uint8_t> data(this->mNumMessagesMax);
1383     initData(data.data(), data.size());
1384     ASSERT_TRUE(this->mQueue->write(data.data(), data.size()));
1385 
1386     // Attempt to read more than the maximum number of messages in the queue.
1387     std::vector<uint8_t> readData(this->mNumMessagesMax + 1);
1388     ASSERT_FALSE(this->mQueue->read(readData.data(), readData.size()));
1389 }
1390 
1391 /*
1392  * Write some data to the queue and attempt to read more than the available data.
1393  */
TYPED_TEST(UnsynchronizedReadWriteTest,ReadMoreThanAvailableToReadFails)1394 TYPED_TEST(UnsynchronizedReadWriteTest, ReadMoreThanAvailableToReadFails) {
1395     // Fill half of the queue with data.
1396     size_t dataLen = this->mNumMessagesMax / 2;
1397     std::vector<uint8_t> data(dataLen);
1398     initData(data.data(), data.size());
1399     ASSERT_TRUE(this->mQueue->write(data.data(), data.size()));
1400 
1401     // Attempt to read more than the available data.
1402     std::vector<uint8_t> readData(dataLen + 1);
1403     ASSERT_FALSE(this->mQueue->read(readData.data(), readData.size()));
1404 }
1405 
1406 /*
1407  * Ensure that the template specialization of MessageQueueBase to element types
1408  * other than MQErased exposes its static knowledge of element size.
1409  */
TEST(MessageQueueErasedTest,MQErasedCompiles)1410 TEST(MessageQueueErasedTest, MQErasedCompiles) {
1411     auto txn = AidlMessageQueueSync::MemRegion();
1412     txn.getLengthInBytes();
1413 }
1414 
1415 extern "C" uint8_t fmq_rust_test(void);
1416 
1417 /*
1418  * Test using the FMQ from Rust.
1419  */
TEST(RustInteropTest,Simple)1420 TEST(RustInteropTest, Simple) {
1421     ASSERT_EQ(fmq_rust_test(), 1);
1422 }
1423 
1424 /*
1425  * Verifies that after ring buffer overflow and first failed attempt to read
1426  * the whole ring buffer is available to read and old values was discarded.
1427  */
TYPED_TEST(UnsynchronizedOverflowHistoryTest,ReadAfterOverflow)1428 TYPED_TEST(UnsynchronizedOverflowHistoryTest, ReadAfterOverflow) {
1429     std::vector<uint16_t> data(this->mNumMessagesMax);
1430 
1431     // Fill the queue with monotonic pattern
1432     initData(&data[0], this->mNumMessagesMax);
1433     ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1434 
1435     // Write more data (first element of the same data) to cause a wrap around
1436     ASSERT_TRUE(this->mQueue->write(&data[0], 1));
1437 
1438     // Attempt a read (this should fail due to how UnsynchronizedWrite works)
1439     uint16_t readDataPlaceholder;
1440     ASSERT_FALSE(this->mQueue->read(&readDataPlaceholder, 1));
1441 
1442     // Verify 1/2 of the ring buffer is available to read
1443     ASSERT_EQ(this->mQueue->availableToRead(), this->mQueue->getQuantumCount() / 2);
1444 
1445     // Next read should succeed as the queue read pointer have been reset in previous read.
1446     std::vector<uint16_t> readData(this->mQueue->availableToRead());
1447     ASSERT_TRUE(this->mQueue->read(readData.data(), readData.size()));
1448 
1449     // Verify that the tail of the data is preserved in history after partial wrap around
1450     // and followed by the new data.
1451     std::rotate(data.begin(), data.begin() + 1, data.end());
1452 
1453     // Compare in reverse to match tail of the data with readData
1454     ASSERT_TRUE(std::equal(readData.rbegin(), readData.rend(), data.rbegin()));
1455 }
1456 
1457 /*
1458  * Verifies that after ring buffer overflow between beginRead() and failed commitRead()
1459  * the whole ring buffer is available to read and old values was discarded.
1460  */
TYPED_TEST(UnsynchronizedOverflowHistoryTest,CommitReadAfterOverflow)1461 TYPED_TEST(UnsynchronizedOverflowHistoryTest, CommitReadAfterOverflow) {
1462     std::vector<uint16_t> data(this->mNumMessagesMax);
1463 
1464     // Fill the queue with monotonic pattern
1465     initData(&data[0], this->mNumMessagesMax);
1466     ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1467 
1468     typename TypeParam::MQType::MemTransaction tx;
1469     ASSERT_TRUE(this->mQueue->beginRead(this->mNumMessagesMax, &tx));
1470 
1471     // Write more data (first element of the same data) to cause a wrap around
1472     ASSERT_TRUE(this->mQueue->write(&data[0], 1));
1473 
1474     // Attempt to commit a read should fail due to ring buffer wrap around
1475     ASSERT_FALSE(this->mQueue->commitRead(this->mNumMessagesMax));
1476 
1477     // Verify 1/2 of the ring buffer is available to read
1478     ASSERT_EQ(this->mQueue->availableToRead(), this->mQueue->getQuantumCount() / 2);
1479 
1480     // Next read should succeed as the queue read pointer have been reset in previous commitRead.
1481     std::vector<uint16_t> readData(this->mQueue->availableToRead());
1482     ASSERT_TRUE(this->mQueue->read(readData.data(), readData.size()));
1483 
1484     // Verify that the tail of the data is preserved in history after partial wrap around
1485     // and followed by the new data.
1486     std::rotate(data.begin(), data.begin() + 1, data.end());
1487     ASSERT_TRUE(std::equal(readData.rbegin(), readData.rend(), data.rbegin()));
1488 }
1489 
1490 /*
1491  * Verifies a queue of a single element will fail a read after a write overflow
1492  * and then recover.
1493  */
TYPED_TEST(UnsynchronizedOverflowHistoryTestSingleElement,ReadAfterOverflow)1494 TYPED_TEST(UnsynchronizedOverflowHistoryTestSingleElement, ReadAfterOverflow) {
1495     constexpr uint16_t kValue = 4;
1496     std::vector<uint16_t> data = {kValue};
1497 
1498     // single write/read works normally
1499     ASSERT_TRUE(this->mQueue->write(&data[0], 1));
1500     uint16_t readDataPlaceholder;
1501     ASSERT_TRUE(this->mQueue->read(&readDataPlaceholder, 1));
1502     EXPECT_EQ(readDataPlaceholder, kValue);
1503 
1504     // Write more data (first element of the same data) to cause a wrap around
1505     ASSERT_TRUE(this->mQueue->write(&data[0], 1));
1506     ASSERT_TRUE(this->mQueue->write(&data[0], 1));
1507 
1508     // Attempt a read (this should fail due to how UnsynchronizedWrite works)
1509     ASSERT_FALSE(this->mQueue->read(&readDataPlaceholder, 1));
1510 
1511     // Subsequent write/reads should work again
1512     ASSERT_TRUE(this->mQueue->write(&data[0], 1));
1513     ASSERT_TRUE(this->mQueue->read(&readDataPlaceholder, 1));
1514     EXPECT_EQ(readDataPlaceholder, kValue);
1515 }
1516