xref: /aosp_15_r20/external/pigweed/pw_ring_buffer/prefixed_entry_ring_buffer_test.cc (revision 61c4878ac05f98d0ceed94b57d316916de578985)
1 // Copyright 2020 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 
15 #include "pw_ring_buffer/prefixed_entry_ring_buffer.h"
16 
17 #include <array>
18 #include <cstddef>
19 #include <cstdint>
20 #include <cstring>
21 
22 #include "pw_assert/check.h"
23 #include "pw_containers/vector.h"
24 #include "pw_unit_test/framework.h"
25 #include "pw_varint/varint.h"
26 
27 using std::byte;
28 
29 namespace pw {
30 namespace ring_buffer {
31 namespace {
32 using Entry = PrefixedEntryRingBufferMulti::Entry;
33 using iterator = PrefixedEntryRingBufferMulti::iterator;
34 
TEST(PrefixedEntryRingBuffer,NoBuffer)35 TEST(PrefixedEntryRingBuffer, NoBuffer) {
36   PrefixedEntryRingBuffer ring(false);
37 
38   byte buf[32];
39   size_t count;
40 
41   EXPECT_EQ(ring.EntryCount(), 0u);
42   EXPECT_EQ(ring.SetBuffer(span<byte>(static_cast<byte*>(nullptr), 10u)),
43             Status::InvalidArgument());
44   EXPECT_EQ(ring.SetBuffer(span(buf, 0u)), Status::InvalidArgument());
45   EXPECT_EQ(ring.FrontEntryDataSizeBytes(), 0u);
46 
47   EXPECT_EQ(ring.PushBack(buf), Status::FailedPrecondition());
48   EXPECT_EQ(ring.EntryCount(), 0u);
49   EXPECT_EQ(ring.PeekFront(buf, &count), Status::FailedPrecondition());
50   EXPECT_EQ(count, 0u);
51   EXPECT_EQ(ring.EntryCount(), 0u);
52   EXPECT_EQ(ring.PeekFrontWithPreamble(buf, &count),
53             Status::FailedPrecondition());
54   EXPECT_EQ(count, 0u);
55   EXPECT_EQ(ring.EntryCount(), 0u);
56   EXPECT_EQ(ring.PopFront(), Status::FailedPrecondition());
57   EXPECT_EQ(ring.EntryCount(), 0u);
58 }
59 
60 // Single entry to write/read/pop over and over again.
61 constexpr byte single_entry_data[] = {byte(1),
62                                       byte(2),
63                                       byte(3),
64                                       byte(4),
65                                       byte(5),
66                                       byte(6),
67                                       byte(7),
68                                       byte(8),
69                                       byte(9)};
70 constexpr size_t single_entry_total_size = sizeof(single_entry_data) + 1;
71 constexpr size_t single_entry_test_buffer_size =
72     (single_entry_total_size * 7) / 2;
73 
74 // Make sure the single_entry_size is even so single_entry_buffer_Size gets the
75 // proper wrap/even behavior when getting to the end of the buffer.
76 static_assert((single_entry_total_size % 2) == 0u);
77 constexpr size_t kSingleEntryCycles = 300u;
78 
79 // Repeatedly write the same data, read it, and pop it, done over and over
80 // again.
SingleEntryWriteReadTest(bool user_data)81 void SingleEntryWriteReadTest(bool user_data) {
82   PrefixedEntryRingBuffer ring(user_data);
83   byte test_buffer[single_entry_test_buffer_size];
84 
85   byte read_buffer[single_entry_total_size];
86 
87   // Set read_size to an unexpected value to make sure result checks don't luck
88   // out and happen to see a previous value.
89   size_t read_size = 500U;
90   uint32_t user_preamble = 0U;
91 
92   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
93 
94   EXPECT_EQ(ring.EntryCount(), 0u);
95   EXPECT_EQ(ring.EntriesSize(), 0u);
96   EXPECT_EQ(ring.PopFront(), Status::OutOfRange());
97   EXPECT_EQ(ring.EntryCount(), 0u);
98   EXPECT_EQ(ring.EntriesSize(), 0u);
99   EXPECT_EQ(ring.PushBack(span(single_entry_data, sizeof(test_buffer) + 5)),
100             Status::OutOfRange());
101   EXPECT_EQ(ring.EntryCount(), 0u);
102   EXPECT_EQ(ring.EntriesSize(), 0u);
103   EXPECT_EQ(ring.PeekFront(read_buffer, &read_size), Status::OutOfRange());
104   EXPECT_EQ(read_size, 0u);
105   read_size = 500U;
106   EXPECT_EQ(ring.PeekFrontWithPreamble(read_buffer, &read_size),
107             Status::OutOfRange());
108   EXPECT_EQ(read_size, 0u);
109 
110   size_t user_preamble_bytes = (user_data ? 1 : 0);
111   size_t data_size = sizeof(single_entry_data) - user_preamble_bytes;
112   size_t data_offset = single_entry_total_size - data_size;
113 
114   byte expect_buffer[single_entry_total_size] = {};
115   expect_buffer[user_preamble_bytes] = byte(data_size);
116   memcpy(expect_buffer + data_offset, single_entry_data, data_size);
117 
118   for (size_t i = 0; i < kSingleEntryCycles; i++) {
119     ASSERT_EQ(ring.FrontEntryDataSizeBytes(), 0u);
120     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), 0u);
121 
122     // Limit the value of the preamble to a single byte, to ensure that we
123     // retain a static `single_entry_buffer_size` during the test. Single
124     // bytes are varint-encoded to the same value.
125     uint32_t preamble_byte = i % 128;
126     ASSERT_EQ(ring.PushBack(span(single_entry_data, data_size), preamble_byte),
127               OkStatus());
128     ASSERT_EQ(ring.EntriesSize(),
129               sizeof(single_entry_data) +
130                   varint::EncodedSize(sizeof(single_entry_data)));
131     ASSERT_EQ(ring.EntryCount(), 1u);
132     ASSERT_EQ(ring.FrontEntryDataSizeBytes(), data_size);
133     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), single_entry_total_size);
134 
135     read_size = 500U;
136     ASSERT_EQ(ring.PeekFront(read_buffer, &read_size), OkStatus());
137     ASSERT_EQ(read_size, data_size);
138 
139     // ASSERT_THAT(span(expect_buffer).last(data_size),
140     //            testing::ElementsAreArray(span(read_buffer, data_size)));
141     ASSERT_EQ(
142         memcmp(
143             span(expect_buffer).last(data_size).data(), read_buffer, data_size),
144         0);
145 
146     read_size = 500U;
147     ASSERT_EQ(ring.PeekFrontWithPreamble(read_buffer, &read_size), OkStatus());
148     ASSERT_EQ(read_size, single_entry_total_size);
149 
150     if (user_data) {
151       expect_buffer[0] = byte(preamble_byte);
152     }
153 
154     // ASSERT_THAT(span(expect_buffer),
155     //            testing::ElementsAreArray(span(read_buffer)));
156     ASSERT_EQ(memcmp(expect_buffer, read_buffer, single_entry_total_size), 0);
157 
158     if (user_data) {
159       user_preamble = 0U;
160       ASSERT_EQ(
161           ring.PeekFrontWithPreamble(read_buffer, user_preamble, read_size),
162           OkStatus());
163       ASSERT_EQ(read_size, data_size);
164       ASSERT_EQ(user_preamble, preamble_byte);
165       ASSERT_EQ(memcmp(span(expect_buffer).last(data_size).data(),
166                        read_buffer,
167                        data_size),
168                 0);
169     }
170 
171     ASSERT_EQ(ring.PopFront(), OkStatus());
172     ASSERT_EQ(ring.EntriesSize(), 0u);
173     ASSERT_EQ(ring.EntryCount(), 0u);
174   }
175 }
176 
TEST(PrefixedEntryRingBuffer,SingleEntryWriteReadNoUserData)177 TEST(PrefixedEntryRingBuffer, SingleEntryWriteReadNoUserData) {
178   SingleEntryWriteReadTest(false);
179 }
180 
TEST(PrefixedEntryRingBuffer,SingleEntryWriteReadYesUserData)181 TEST(PrefixedEntryRingBuffer, SingleEntryWriteReadYesUserData) {
182   SingleEntryWriteReadTest(true);
183 }
184 
185 // TODO: b/234883746 - Increase this to 5000 once we have a way to detect
186 // targets with more computation and memory oomph.
187 constexpr size_t kOuterCycles = 50u;
188 constexpr size_t kCountingUpMaxExpectedEntries =
189     single_entry_test_buffer_size / single_entry_total_size;
190 
191 // Write data that is filled with a byte value that increments each write. Write
192 // many times without read/pop and then check to make sure correct contents are
193 // in the ring buffer.
194 template <bool kUserData>
CountingUpWriteReadTest()195 void CountingUpWriteReadTest() {
196   PrefixedEntryRingBuffer ring(kUserData);
197   byte test_buffer[single_entry_test_buffer_size];
198 
199   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
200   EXPECT_EQ(ring.EntryCount(), 0u);
201 
202   constexpr size_t kDataSize = sizeof(single_entry_data) - (kUserData ? 1 : 0);
203 
204   for (size_t i = 0; i < kOuterCycles; i++) {
205     size_t seed = i;
206 
207     byte write_buffer[kDataSize];
208 
209     size_t j;
210     for (j = 0; j < kSingleEntryCycles; j++) {
211       memset(write_buffer, j + seed, sizeof(write_buffer));
212 
213       ASSERT_EQ(ring.PushBack(write_buffer), OkStatus());
214 
215       size_t expected_count = (j < kCountingUpMaxExpectedEntries)
216                                   ? j + 1
217                                   : kCountingUpMaxExpectedEntries;
218       ASSERT_EQ(ring.EntryCount(), expected_count);
219     }
220     size_t final_write_j = j;
221     size_t fill_val = seed + final_write_j - kCountingUpMaxExpectedEntries;
222 
223     for (j = 0; j < kCountingUpMaxExpectedEntries; j++) {
224       byte read_buffer[sizeof(write_buffer)];
225       size_t read_size;
226       memset(write_buffer, fill_val + j, sizeof(write_buffer));
227       ASSERT_EQ(ring.PeekFront(read_buffer, &read_size), OkStatus());
228 
229       ASSERT_EQ(memcmp(write_buffer, read_buffer, kDataSize), 0);
230 
231       ASSERT_EQ(ring.PopFront(), OkStatus());
232     }
233   }
234 }
235 
TEST(PrefixedEntryRingBuffer,CountingUpWriteReadNoUserData)236 TEST(PrefixedEntryRingBuffer, CountingUpWriteReadNoUserData) {
237   CountingUpWriteReadTest<false>();
238 }
239 
TEST(PrefixedEntryRingBuffer,CountingUpWriteReadYesUserData)240 TEST(PrefixedEntryRingBuffer, CountingUpWriteReadYesUserData) {
241   CountingUpWriteReadTest<true>();
242 }
243 
244 // Create statically to prevent allocating a capture in the lambda below.
245 static pw::Vector<byte, single_entry_total_size> read_buffer;
246 
247 // Repeatedly write the same data, read it, and pop it, done over and over
248 // again.
SingleEntryWriteReadWithSectionWriterTest(bool user_data)249 void SingleEntryWriteReadWithSectionWriterTest(bool user_data) {
250   PrefixedEntryRingBuffer ring(user_data);
251   byte test_buffer[single_entry_test_buffer_size];
252 
253   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
254 
255   auto output = [](span<const byte> src) -> Status {
256     for (byte b : src) {
257       read_buffer.push_back(b);
258     }
259     return OkStatus();
260   };
261 
262   size_t user_preamble_bytes = (user_data ? 1 : 0);
263   size_t data_size = sizeof(single_entry_data) - user_preamble_bytes;
264   size_t data_offset = single_entry_total_size - data_size;
265 
266   byte expect_buffer[single_entry_total_size] = {};
267   expect_buffer[user_preamble_bytes] = byte(data_size);
268   memcpy(expect_buffer + data_offset, single_entry_data, data_size);
269 
270   for (size_t i = 0; i < kSingleEntryCycles; i++) {
271     ASSERT_EQ(ring.FrontEntryDataSizeBytes(), 0u);
272     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), 0u);
273 
274     // Limit the value of the preamble to a single byte, to ensure that we
275     // retain a static `single_entry_buffer_size` during the test. Single
276     // bytes are varint-encoded to the same value.
277     uint32_t preamble_byte = i % 128;
278     ASSERT_EQ(ring.PushBack(span(single_entry_data, data_size), preamble_byte),
279               OkStatus());
280     ASSERT_EQ(ring.FrontEntryDataSizeBytes(), data_size);
281     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), single_entry_total_size);
282 
283     read_buffer.clear();
284     ASSERT_EQ(ring.PeekFront(output), OkStatus());
285     ASSERT_EQ(read_buffer.size(), data_size);
286 
287     ASSERT_EQ(memcmp(span(expect_buffer).last(data_size).data(),
288                      read_buffer.data(),
289                      data_size),
290               0);
291 
292     read_buffer.clear();
293     ASSERT_EQ(ring.PeekFrontWithPreamble(output), OkStatus());
294     ASSERT_EQ(read_buffer.size(), single_entry_total_size);
295     ASSERT_EQ(ring.PopFront(), OkStatus());
296 
297     if (user_data) {
298       expect_buffer[0] = byte(preamble_byte);
299     }
300 
301     ASSERT_EQ(
302         memcmp(expect_buffer, read_buffer.data(), single_entry_total_size), 0);
303   }
304 }
305 
TEST(PrefixedEntryRingBuffer,SingleEntryWriteReadWithSectionWriterNoUserData)306 TEST(PrefixedEntryRingBuffer, SingleEntryWriteReadWithSectionWriterNoUserData) {
307   SingleEntryWriteReadWithSectionWriterTest(false);
308 }
309 
TEST(PrefixedEntryRingBuffer,SingleEntryWriteReadWithSectionWriterYesUserData)310 TEST(PrefixedEntryRingBuffer,
311      SingleEntryWriteReadWithSectionWriterYesUserData) {
312   SingleEntryWriteReadWithSectionWriterTest(true);
313 }
314 
315 constexpr size_t kEntrySizeBytes = 8u;
316 constexpr size_t kTotalEntryCount = 20u;
317 constexpr size_t kBufferExtraBytes = 5u;
318 constexpr size_t kTestBufferSize =
319     (kEntrySizeBytes * kTotalEntryCount) + kBufferExtraBytes;
320 
321 // Create statically to prevent allocating a capture in the lambda below.
322 static pw::Vector<byte, kTestBufferSize> actual_result;
323 
DeringTest(bool preload)324 void DeringTest(bool preload) {
325   PrefixedEntryRingBuffer ring;
326 
327   byte test_buffer[kTestBufferSize];
328   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
329 
330   // Entry data is entry size - preamble (single byte in this case).
331   byte single_entry_buffer[kEntrySizeBytes - 1u];
332   auto entry_data = span(single_entry_buffer);
333   size_t i;
334 
335   // TODO: b/234883746 - Increase this to 500 once we have a way to detect
336   // targets with more computation and memory oomph.
337   size_t loop_goal = preload ? 50 : 1;
338 
339   for (size_t main_loop_count = 0; main_loop_count < loop_goal;
340        main_loop_count++) {
341     if (preload) {
342       // Prime the ringbuffer with some junk data to get the buffer
343       // wrapped.
344       for (i = 0; i < (kTotalEntryCount * (main_loop_count % 64u)); i++) {
345         memset(single_entry_buffer, i, sizeof(single_entry_buffer));
346         ASSERT_EQ(OkStatus(), ring.PushBack(single_entry_buffer));
347       }
348     }
349 
350     // Build up the expected buffer and fill the ring buffer with the test data.
351     pw::Vector<byte, kTestBufferSize> expected_result;
352     for (i = 0; i < kTotalEntryCount; i++) {
353       // First component of the entry: the varint size.
354       static_assert(sizeof(single_entry_buffer) < 127);
355       expected_result.push_back(byte(sizeof(single_entry_buffer)));
356 
357       // Second component of the entry: the raw data.
358       memset(single_entry_buffer, 'a' + i, sizeof(single_entry_buffer));
359       for (byte b : entry_data) {
360         expected_result.push_back(b);
361       }
362 
363       // The ring buffer internally pushes the varint size byte.
364       ASSERT_EQ(OkStatus(), ring.PushBack(single_entry_buffer));
365     }
366 
367     // Check values before doing the dering.
368     EXPECT_EQ(ring.EntryCount(), kTotalEntryCount);
369     EXPECT_EQ(expected_result.size(), ring.TotalUsedBytes());
370 
371     ASSERT_EQ(ring.Dering(), OkStatus());
372 
373     // Check values after doing the dering.
374     EXPECT_EQ(ring.EntryCount(), kTotalEntryCount);
375     EXPECT_EQ(expected_result.size(), ring.TotalUsedBytes());
376 
377     // Read out the entries of the ring buffer.
378     actual_result.clear();
379     auto output = [](span<const byte> src) -> Status {
380       for (byte b : src) {
381         actual_result.push_back(b);
382       }
383       return OkStatus();
384     };
385     while (ring.EntryCount()) {
386       ASSERT_EQ(ring.PeekFrontWithPreamble(output), OkStatus());
387       ASSERT_EQ(ring.PopFront(), OkStatus());
388     }
389 
390     // Ensure the actual result out of the ring buffer matches our manually
391     // computed result.
392     EXPECT_EQ(expected_result.size(), actual_result.size());
393     ASSERT_EQ(memcmp(test_buffer, actual_result.data(), actual_result.size()),
394               0);
395     ASSERT_EQ(
396         memcmp(
397             expected_result.data(), actual_result.data(), actual_result.size()),
398         0);
399   }
400 }
401 
TEST(PrefixedEntryRingBuffer,Dering)402 TEST(PrefixedEntryRingBuffer, Dering) { DeringTest(true); }
TEST(PrefixedEntryRingBuffer,DeringNoPreload)403 TEST(PrefixedEntryRingBuffer, DeringNoPreload) { DeringTest(false); }
404 
405 template <typename T>
PushBack(PrefixedEntryRingBufferMulti & ring,T element,uint32_t user_preamble=0)406 Status PushBack(PrefixedEntryRingBufferMulti& ring,
407                 T element,
408                 uint32_t user_preamble = 0) {
409   union {
410     std::array<byte, sizeof(element)> buffer;
411     T item;
412   } aliased;
413   aliased.item = element;
414   return ring.PushBack(aliased.buffer, user_preamble);
415 }
416 
417 template <typename T>
TryPushBack(PrefixedEntryRingBufferMulti & ring,T element,uint32_t user_preamble=0)418 Status TryPushBack(PrefixedEntryRingBufferMulti& ring,
419                    T element,
420                    uint32_t user_preamble = 0) {
421   union {
422     std::array<byte, sizeof(element)> buffer;
423     T item;
424   } aliased;
425   aliased.item = element;
426   return ring.TryPushBack(aliased.buffer, user_preamble);
427 }
428 
429 template <typename T>
PeekFront(PrefixedEntryRingBufferMulti::Reader & reader,uint32_t * user_preamble_out=nullptr)430 T PeekFront(PrefixedEntryRingBufferMulti::Reader& reader,
431             uint32_t* user_preamble_out = nullptr) {
432   union {
433     std::array<byte, sizeof(T)> buffer;
434     T item;
435   } aliased;
436   size_t bytes_read = 0;
437   uint32_t user_preamble = 0;
438   PW_CHECK_OK(
439       reader.PeekFrontWithPreamble(aliased.buffer, user_preamble, bytes_read));
440   PW_CHECK_INT_EQ(bytes_read, sizeof(T));
441   if (user_preamble_out) {
442     *user_preamble_out = user_preamble;
443   }
444   return aliased.item;
445 }
446 
447 template <typename T>
GetEntry(span<const std::byte> lhs)448 T GetEntry(span<const std::byte> lhs) {
449   union {
450     std::array<byte, sizeof(T)> buffer;
451     T item;
452   } aliased;
453   std::memcpy(aliased.buffer.data(), lhs.data(), lhs.size_bytes());
454   return aliased.item;
455 }
456 
EmptyDataPushBackTest(bool user_data)457 void EmptyDataPushBackTest(bool user_data) {
458   PrefixedEntryRingBuffer ring(user_data);
459   byte test_buffer[kTestBufferSize];
460   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
461 
462   // Push back an empty span and a non-empty span.
463   EXPECT_EQ(ring.PushBack(span<std::byte>(), 1u), OkStatus());
464   EXPECT_EQ(ring.EntryCount(), 1u);
465   EXPECT_EQ(ring.PushBack(single_entry_data, 2u), OkStatus());
466   EXPECT_EQ(ring.EntryCount(), 2u);
467 
468   // Confirm that both entries can be read back.
469   byte entry_buffer[kTestBufferSize];
470   uint32_t user_preamble = 0;
471   size_t bytes_read = 0;
472   // Read empty span.
473   EXPECT_EQ(ring.PeekFrontWithPreamble(entry_buffer, user_preamble, bytes_read),
474             OkStatus());
475   EXPECT_EQ(user_preamble, user_data ? 1u : 0u);
476   EXPECT_EQ(bytes_read, 0u);
477   EXPECT_EQ(ring.PopFront(), OkStatus());
478   EXPECT_EQ(ring.EntryCount(), 1u);
479   // Read non-empty span.
480   EXPECT_EQ(ring.PeekFrontWithPreamble(entry_buffer, user_preamble, bytes_read),
481             OkStatus());
482   EXPECT_EQ(user_preamble, user_data ? 2u : 0u);
483   ASSERT_EQ(bytes_read, sizeof(single_entry_data));
484   EXPECT_EQ(memcmp(entry_buffer, single_entry_data, bytes_read), 0);
485   EXPECT_EQ(ring.PopFront(), OkStatus());
486   EXPECT_EQ(ring.EntryCount(), 0u);
487 }
488 
TEST(PrefixedEntryRingBuffer,EmptyDataPushBackTestWithPreamble)489 TEST(PrefixedEntryRingBuffer, EmptyDataPushBackTestWithPreamble) {
490   EmptyDataPushBackTest(true);
491 }
TEST(PrefixedEntryRingBuffer,EmptyDataPushBackTestNoPreamble)492 TEST(PrefixedEntryRingBuffer, EmptyDataPushBackTestNoPreamble) {
493   EmptyDataPushBackTest(false);
494 }
495 
TEST(PrefixedEntryRingBuffer,TryPushBack)496 TEST(PrefixedEntryRingBuffer, TryPushBack) {
497   PrefixedEntryRingBuffer ring;
498   byte test_buffer[kTestBufferSize];
499   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
500 
501   // Fill up the ring buffer with a constant.
502   int total_items = 0;
503   while (true) {
504     Status status = TryPushBack<int>(ring, 5);
505     if (status.ok()) {
506       total_items++;
507     } else {
508       EXPECT_EQ(status, Status::ResourceExhausted());
509       break;
510     }
511   }
512   EXPECT_EQ(PeekFront<int>(ring), 5);
513 
514   // Should be unable to push more items.
515   for (int i = 0; i < total_items; ++i) {
516     EXPECT_EQ(TryPushBack<int>(ring, 100), Status::ResourceExhausted());
517     EXPECT_EQ(PeekFront<int>(ring), 5);
518   }
519 
520   // Fill up the ring buffer with a constant.
521   for (int i = 0; i < total_items; ++i) {
522     EXPECT_EQ(PushBack<int>(ring, 100), OkStatus());
523   }
524   EXPECT_EQ(PeekFront<int>(ring), 100);
525 }
526 
TEST(PrefixedEntryRingBuffer,Iterator)527 TEST(PrefixedEntryRingBuffer, Iterator) {
528   PrefixedEntryRingBuffer ring;
529   byte test_buffer[kTestBufferSize];
530   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
531 
532   // Fill up the ring buffer with a constant value.
533   size_t entry_count = 0;
534   while (TryPushBack<size_t>(ring, entry_count).ok()) {
535     entry_count++;
536   }
537 
538   // Iterate over all entries and confirm entry count.
539   size_t validated_entries = 0;
540   for (Result<const Entry> entry_info : ring) {
541     EXPECT_TRUE(entry_info.status().ok());
542     EXPECT_EQ(GetEntry<size_t>(entry_info.value().buffer), validated_entries);
543     validated_entries++;
544   }
545   EXPECT_EQ(validated_entries, entry_count);
546 }
547 
TEST(PrefixedEntryRingBuffer,IteratorDecrement)548 TEST(PrefixedEntryRingBuffer, IteratorDecrement) {
549   PrefixedEntryRingBuffer ring;
550   byte test_buffer[kTestBufferSize];
551   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
552 
553   // Fill up the ring buffer with a constant value.
554   size_t entry_count = 0;
555   while (TryPushBack<size_t>(ring, entry_count).ok()) {
556     entry_count++;
557   }
558 
559   // Move the iterator to the last element.
560   iterator begin_it = ring.begin();
561   iterator it = ring.begin();
562   for (size_t i = 0; i < entry_count - 1; ++i) {
563     it++;
564   }
565 
566   // Decrement iterator to the beginning, checking each value
567   size_t validated_entries = entry_count - 1;
568   do {
569     EXPECT_TRUE(it.status().ok());
570     EXPECT_EQ(GetEntry<size_t>(it->buffer), validated_entries);
571     it--;
572     validated_entries--;
573   } while (it != begin_it);
574 
575   EXPECT_EQ(validated_entries, static_cast<size_t>(0));
576 
577   --it;
578   EXPECT_EQ(ring.end(), it);
579   EXPECT_EQ(Status::DataLoss(), it.status());
580 }
581 
TEST(PrefixedEntryRingBuffer,EntriesSizeWhenBufferFull)582 TEST(PrefixedEntryRingBuffer, EntriesSizeWhenBufferFull) {
583   PrefixedEntryRingBuffer ring;
584 
585   constexpr size_t kSingleEntryInternalSize =
586       sizeof(single_entry_data) +
587       varint::EncodedSize(sizeof(single_entry_data));
588 
589   // Set the buffer size to be a multiple of single entry data size.
590   std::array<std::byte, kSingleEntryInternalSize> test_buffer;
591   ASSERT_EQ(ring.SetBuffer(test_buffer), OkStatus());
592 
593   // Set the buffer to 100% full.
594   while (true) {
595     Status status = ring.TryPushBack(single_entry_data);
596     ASSERT_EQ(ring.TotalUsedBytes(),
597               ring.EntryCount() * kSingleEntryInternalSize);
598     if (!status.ok()) {
599       EXPECT_EQ(status, Status::ResourceExhausted());
600       break;
601     }
602   }
603   ASSERT_EQ(ring.TotalUsedBytes(), ring.TotalSizeBytes());
604   EXPECT_EQ(ring.EntriesSize(), ring.TotalSizeBytes());
605 
606   // Push one more entry.
607   EXPECT_EQ(ring.PushBack(single_entry_data), OkStatus());
608   EXPECT_EQ(ring.EntriesSize(), ring.TotalSizeBytes());
609 }
610 
TEST(PrefixedEntryRingBufferMulti,TryPushBack)611 TEST(PrefixedEntryRingBufferMulti, TryPushBack) {
612   PrefixedEntryRingBufferMulti ring;
613   byte test_buffer[kTestBufferSize];
614   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
615 
616   PrefixedEntryRingBufferMulti::Reader fast_reader;
617   PrefixedEntryRingBufferMulti::Reader slow_reader;
618 
619   EXPECT_EQ(ring.AttachReader(fast_reader), OkStatus());
620   EXPECT_EQ(ring.AttachReader(slow_reader), OkStatus());
621 
622   // Fill up the ring buffer with an increasing count.
623   int total_items = 0;
624   while (true) {
625     Status status = TryPushBack<int>(ring, total_items);
626     if (status.ok()) {
627       total_items++;
628     } else {
629       EXPECT_EQ(status, Status::ResourceExhausted());
630       break;
631     }
632   }
633 
634   EXPECT_EQ(fast_reader.EntriesSize(), ring.TotalUsedBytes());
635   EXPECT_EQ(slow_reader.EntriesSize(), ring.TotalUsedBytes());
636 
637   // Run fast reader twice as fast as the slow reader.
638   size_t total_used_bytes = ring.TotalUsedBytes();
639   for (int i = 0; i < total_items; ++i) {
640     EXPECT_EQ(PeekFront<int>(fast_reader), i);
641     EXPECT_EQ(fast_reader.PopFront(), OkStatus());
642     EXPECT_EQ(ring.TotalUsedBytes(), total_used_bytes);
643     if (i % 2 == 0) {
644       EXPECT_EQ(PeekFront<int>(slow_reader), i / 2);
645       EXPECT_EQ(slow_reader.PopFront(), OkStatus());
646       EXPECT_TRUE(ring.TotalUsedBytes() < total_used_bytes);
647     }
648     total_used_bytes = ring.TotalUsedBytes();
649   }
650   EXPECT_EQ(fast_reader.PopFront(), Status::OutOfRange());
651   EXPECT_EQ(fast_reader.EntriesSize(), 0u);
652   EXPECT_EQ(slow_reader.EntriesSize(), ring.TotalUsedBytes());
653   EXPECT_TRUE(ring.TotalUsedBytes() > 0u);
654 
655   // Fill the buffer again, expect that the fast reader
656   // only sees half the entries as the slow reader.
657   size_t max_items = total_items;
658   const size_t total_used_bytes_before_pushing = total_used_bytes;
659   while (true) {
660     Status status = TryPushBack<int>(ring, total_items);
661     if (status.ok()) {
662       total_items++;
663     } else {
664       EXPECT_EQ(status, Status::ResourceExhausted());
665       break;
666     }
667   }
668   EXPECT_EQ(slow_reader.EntryCount(), max_items);
669   EXPECT_EQ(slow_reader.EntriesSize(), ring.TotalUsedBytes());
670   EXPECT_EQ(fast_reader.EntryCount(), total_items - max_items);
671   // Fast reader pops all the entries before the second push.
672   EXPECT_EQ(fast_reader.EntriesSize(),
673             ring.TotalUsedBytes() - total_used_bytes_before_pushing);
674 
675   for (int i = total_items - max_items; i < total_items; ++i) {
676     EXPECT_EQ(PeekFront<int>(slow_reader), i);
677     EXPECT_EQ(slow_reader.PopFront(), OkStatus());
678     if (static_cast<size_t>(i) >= max_items) {
679       EXPECT_EQ(PeekFront<int>(fast_reader), i);
680       EXPECT_EQ(fast_reader.PopFront(), OkStatus());
681     }
682   }
683   EXPECT_EQ(slow_reader.PopFront(), Status::OutOfRange());
684   EXPECT_EQ(slow_reader.EntriesSize(), 0u);
685   EXPECT_EQ(fast_reader.PopFront(), Status::OutOfRange());
686   EXPECT_EQ(fast_reader.EntriesSize(), 0u);
687   EXPECT_EQ(ring.TotalUsedBytes(), 0u);
688 }
689 
TEST(PrefixedEntryRingBufferMulti,PushBack)690 TEST(PrefixedEntryRingBufferMulti, PushBack) {
691   PrefixedEntryRingBufferMulti ring;
692   byte test_buffer[kTestBufferSize];
693   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
694 
695   PrefixedEntryRingBufferMulti::Reader fast_reader;
696   PrefixedEntryRingBufferMulti::Reader slow_reader;
697 
698   EXPECT_EQ(ring.AttachReader(fast_reader), OkStatus());
699   EXPECT_EQ(ring.AttachReader(slow_reader), OkStatus());
700 
701   // Fill up the ring buffer with an increasing count.
702   size_t total_items = 0;
703   while (true) {
704     Status status = TryPushBack<uint32_t>(ring, total_items);
705     if (status.ok()) {
706       total_items++;
707     } else {
708       EXPECT_EQ(status, Status::ResourceExhausted());
709       break;
710     }
711   }
712   EXPECT_EQ(slow_reader.EntryCount(), total_items);
713 
714   // The following test:
715   //  - Moves the fast reader forward by one entry.
716   //  - Writes a single entry that is guaranteed to be larger than the size of a
717   //    single entry in the buffer (uint64_t entry > uint32_t entry).
718   //  - Checks to see that both readers were moved forward.
719   EXPECT_EQ(fast_reader.PopFront(), OkStatus());
720   EXPECT_EQ(PushBack<uint64_t>(ring, 5u), OkStatus());
721   // The readers have moved past values 0 and 1.
722   EXPECT_EQ(PeekFront<uint32_t>(slow_reader), 2u);
723   EXPECT_EQ(PeekFront<uint32_t>(fast_reader), 2u);
724   // The readers have lost two entries, but gained an entry.
725   EXPECT_EQ(slow_reader.EntryCount(), total_items - 1);
726   EXPECT_EQ(fast_reader.EntryCount(), total_items - 1);
727 }
728 
TEST(PrefixedEntryRingBufferMulti,ReaderAddRemove)729 TEST(PrefixedEntryRingBufferMulti, ReaderAddRemove) {
730   PrefixedEntryRingBufferMulti ring;
731   byte test_buffer[kTestBufferSize];
732   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
733 
734   PrefixedEntryRingBufferMulti::Reader reader;
735   PrefixedEntryRingBufferMulti::Reader transient_reader;
736 
737   EXPECT_EQ(ring.AttachReader(reader), OkStatus());
738 
739   // Fill up the ring buffer with a constant value.
740   size_t total_items = 0;
741   while (true) {
742     Status status = TryPushBack<size_t>(ring, total_items);
743     if (status.ok()) {
744       total_items++;
745     } else {
746       EXPECT_EQ(status, Status::ResourceExhausted());
747       break;
748     }
749   }
750   EXPECT_EQ(reader.EntryCount(), total_items);
751 
752   // Add new reader after filling the buffer.
753   EXPECT_EQ(ring.AttachReader(transient_reader), OkStatus());
754   EXPECT_EQ(transient_reader.EntryCount(), total_items);
755   EXPECT_EQ(transient_reader.EntriesSize(), ring.TotalUsedBytes());
756 
757   // Confirm that the transient reader observes all values, even though it was
758   // attached after entries were pushed.
759   for (size_t i = 0; i < total_items; i++) {
760     EXPECT_EQ(PeekFront<size_t>(transient_reader), i);
761     EXPECT_EQ(transient_reader.PopFront(), OkStatus());
762   }
763   EXPECT_EQ(transient_reader.EntryCount(), 0u);
764   EXPECT_EQ(transient_reader.EntriesSize(), 0u);
765 
766   // Confirm that re-attaching the reader resets it back to the oldest
767   // available entry.
768   EXPECT_EQ(ring.DetachReader(transient_reader), OkStatus());
769   EXPECT_EQ(ring.AttachReader(transient_reader), OkStatus());
770   EXPECT_EQ(transient_reader.EntryCount(), total_items);
771   EXPECT_EQ(transient_reader.EntriesSize(), ring.TotalUsedBytes());
772 
773   for (size_t i = 0; i < total_items; i++) {
774     EXPECT_EQ(PeekFront<size_t>(transient_reader), i);
775     EXPECT_EQ(transient_reader.PopFront(), OkStatus());
776   }
777   EXPECT_EQ(transient_reader.EntryCount(), 0u);
778   EXPECT_EQ(transient_reader.EntriesSize(), 0u);
779 }
780 
TEST(PrefixedEntryRingBufferMulti,SingleBufferPerReader)781 TEST(PrefixedEntryRingBufferMulti, SingleBufferPerReader) {
782   PrefixedEntryRingBufferMulti ring_one;
783   PrefixedEntryRingBufferMulti ring_two;
784   byte test_buffer[kTestBufferSize];
785   EXPECT_EQ(ring_one.SetBuffer(test_buffer), OkStatus());
786 
787   PrefixedEntryRingBufferMulti::Reader reader;
788   EXPECT_EQ(ring_one.AttachReader(reader), OkStatus());
789   EXPECT_EQ(ring_two.AttachReader(reader), Status::InvalidArgument());
790 
791   EXPECT_EQ(ring_one.DetachReader(reader), OkStatus());
792   EXPECT_EQ(ring_two.AttachReader(reader), OkStatus());
793   EXPECT_EQ(ring_one.AttachReader(reader), Status::InvalidArgument());
794 }
795 
TEST(PrefixedEntryRingBufferMulti,IteratorEmptyBuffer)796 TEST(PrefixedEntryRingBufferMulti, IteratorEmptyBuffer) {
797   PrefixedEntryRingBufferMulti ring;
798   // Pick a buffer that can't contain any valid sections.
799   byte test_buffer[1] = {std::byte(0xFF)};
800 
801   PrefixedEntryRingBufferMulti::Reader reader;
802   EXPECT_EQ(ring.AttachReader(reader), OkStatus());
803   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
804 
805   EXPECT_EQ(ring.begin(), ring.end());
806 }
807 
TEST(PrefixedEntryRingBufferMulti,IteratorValidEntries)808 TEST(PrefixedEntryRingBufferMulti, IteratorValidEntries) {
809   PrefixedEntryRingBufferMulti ring;
810   byte test_buffer[kTestBufferSize];
811   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
812 
813   PrefixedEntryRingBufferMulti::Reader reader;
814   EXPECT_EQ(ring.AttachReader(reader), OkStatus());
815 
816   // Buffer only contains valid entries. This happens after populating
817   // the buffer and no entries have been read.
818   // E.g. [VALID|VALID|VALID|INVALID]
819 
820   // Fill up the ring buffer with a constant value.
821   size_t entry_count = 0;
822   while (TryPushBack<size_t>(ring, entry_count).ok()) {
823     entry_count++;
824   }
825 
826   // Iterate over all entries and confirm entry count.
827   size_t validated_entries = 0;
828   for (const Entry& entry_info : ring) {
829     EXPECT_EQ(GetEntry<size_t>(entry_info.buffer), validated_entries);
830     validated_entries++;
831   }
832   EXPECT_EQ(validated_entries, entry_count);
833 }
834 
TEST(PrefixedEntryRingBufferMulti,IteratorValidEntriesWithPreamble)835 TEST(PrefixedEntryRingBufferMulti, IteratorValidEntriesWithPreamble) {
836   PrefixedEntryRingBufferMulti ring(true);
837   byte test_buffer[kTestBufferSize];
838   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
839 
840   PrefixedEntryRingBufferMulti::Reader reader;
841   EXPECT_EQ(ring.AttachReader(reader), OkStatus());
842 
843   // Buffer only contains valid entries. This happens after populating
844   // the buffer and no entries have been read.
845   // E.g. [VALID|VALID|VALID|INVALID]
846 
847   // Fill up the ring buffer with a constant value.
848   size_t entry_count = 0;
849   while (TryPushBack<size_t>(ring, entry_count, entry_count).ok()) {
850     entry_count++;
851   }
852 
853   // Iterate over all entries and confirm entry count.
854   size_t validated_entries = 0;
855   for (const Entry& entry_info : ring) {
856     EXPECT_EQ(GetEntry<size_t>(entry_info.buffer), validated_entries);
857     EXPECT_EQ(entry_info.preamble, validated_entries);
858     validated_entries++;
859   }
860   EXPECT_EQ(validated_entries, entry_count);
861 }
862 
TEST(PrefixedEntryRingBufferMulti,IteratorStaleEntries)863 TEST(PrefixedEntryRingBufferMulti, IteratorStaleEntries) {
864   PrefixedEntryRingBufferMulti ring;
865   byte test_buffer[kTestBufferSize];
866   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
867 
868   // Buffer only contains stale, valid entries. This happens when after
869   // populating the buffer, all entries are read. The buffer retains the
870   // data but has an entry count of zero.
871   // E.g. [STALE|STALE|STALE]
872   PrefixedEntryRingBufferMulti::Reader trailing_reader;
873   EXPECT_EQ(ring.AttachReader(trailing_reader), OkStatus());
874 
875   PrefixedEntryRingBufferMulti::Reader reader;
876   EXPECT_EQ(ring.AttachReader(reader), OkStatus());
877 
878   // Push and pop all the entries.
879   size_t entry_count = 0;
880   while (TryPushBack<size_t>(ring, entry_count).ok()) {
881     entry_count++;
882   }
883 
884   while (reader.PopFront().ok()) {
885   }
886 
887   // Iterate over all entries and confirm entry count.
888   size_t validated_entries = 0;
889   for (const Entry& entry_info : ring) {
890     EXPECT_EQ(GetEntry<size_t>(entry_info.buffer), validated_entries);
891     validated_entries++;
892   }
893   EXPECT_EQ(validated_entries, entry_count);
894 }
895 
TEST(PrefixedEntryRingBufferMulti,IteratorValidStaleEntries)896 TEST(PrefixedEntryRingBufferMulti, IteratorValidStaleEntries) {
897   PrefixedEntryRingBufferMulti ring;
898   byte test_buffer[kTestBufferSize];
899   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
900 
901   // Buffer contains both valid and stale entries. This happens when after
902   // populating the buffer, only some of the entries are read.
903   // E.g. [VALID|INVALID|STALE|STALE]
904   PrefixedEntryRingBufferMulti::Reader trailing_reader;
905   EXPECT_EQ(ring.AttachReader(trailing_reader), OkStatus());
906 
907   PrefixedEntryRingBufferMulti::Reader reader;
908   EXPECT_EQ(ring.AttachReader(reader), OkStatus());
909 
910   // Fill the buffer with entries.
911   size_t entry_count = 0;
912   while (TryPushBack<size_t>(ring, entry_count).ok()) {
913     entry_count++;
914   }
915 
916   // Pop roughly half the entries.
917   while (reader.EntryCount() > (entry_count / 2)) {
918     EXPECT_TRUE(reader.PopFront().ok());
919   }
920 
921   // Iterate over all entries and confirm entry count.
922   size_t validated_entries = 0;
923   for (const Entry& entry_info : ring) {
924     EXPECT_EQ(GetEntry<size_t>(entry_info.buffer), validated_entries);
925     validated_entries++;
926   }
927   EXPECT_EQ(validated_entries, entry_count);
928 }
929 
TEST(PrefixedEntryRingBufferMulti,IteratorBufferCorruption)930 TEST(PrefixedEntryRingBufferMulti, IteratorBufferCorruption) {
931   PrefixedEntryRingBufferMulti ring;
932   byte test_buffer[kTestBufferSize];
933   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
934 
935   // Buffer contains partially written entries. This may happen if writing
936   // is pre-empted (e.g. a crash occurs). In this state, we expect a series
937   // of valid entries followed by an invalid entry.
938   PrefixedEntryRingBufferMulti::Reader trailing_reader;
939   EXPECT_EQ(ring.AttachReader(trailing_reader), OkStatus());
940 
941   // Add one entry to capture the second entry index.
942   size_t entry_count = 0;
943   EXPECT_TRUE(TryPushBack<size_t>(ring, entry_count++).ok());
944   size_t entry_size = ring.TotalUsedBytes();
945 
946   // Fill the buffer with entries.
947   while (TryPushBack<size_t>(ring, entry_count++).ok()) {
948   }
949 
950   // Push another entry to move the write index forward and force the oldest
951   // reader forward. This will require the iterator to dering.
952   EXPECT_TRUE(PushBack<size_t>(ring, 0).ok());
953   EXPECT_TRUE(ring.CheckForCorruption().ok());
954 
955   // The first entry is overwritten. Corrupt all data past the fifth entry.
956   // Note that because the first entry has shifted, the entry_count recorded
957   // in each entry is shifted by 1.
958   constexpr size_t valid_entries = 5;
959   size_t offset = valid_entries * entry_size;
960   memset(test_buffer + offset, 0xFF, kTestBufferSize - offset);
961   EXPECT_FALSE(ring.CheckForCorruption().ok());
962 
963   // Iterate over all entries and confirm entry count.
964   size_t validated_entries = 0;
965   iterator it = ring.begin();
966   for (; it != ring.end(); it++) {
967     EXPECT_EQ(GetEntry<size_t>(it->buffer), validated_entries + 1);
968     validated_entries++;
969   }
970   // The final entry will fail to be read.
971   EXPECT_EQ(it.status(), Status::DataLoss());
972   EXPECT_EQ(validated_entries, valid_entries);
973 }
974 
975 }  // namespace
976 }  // namespace ring_buffer
977 }  // namespace pw
978